text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
import pysam
import sys
from re import sub
from random import random
from uuid import uuid4
if len(sys.argv) == 2:
assert sys.argv[1].endswith('.bam')
inbamfn = sys.argv[1]
outbamfn = sub('.bam$', '.renamereads.bam', inbamfn)
inbam = pysam.Samfile(inbamfn, 'rb')
outbam = pysam.Samfile(outbamfn, 'wb', template=inbam)
paired = {}
n = 0
p = 0
u = 0
w = 0
m = 0
for read in inbam.fetch(until_eof=True):
n += 1
if read.is_paired:
p += 1
if read.qname in paired:
uuid = paired[read.qname]
del paired[read.qname]
read.qname = uuid
outbam.write(read)
w += 1
m += 1
else:
newname = str(uuid4())
paired[read.qname] = newname
read.qname = newname
outbam.write(read)
w += 1
else:
u += 1
read.qname = str(uuid4())
outbam.write(read)
w += 1
if n % 1000000 == 0:
print("Processed", n, "reads:", p, "paired,", u, "unpaired,", w, "written,", m, "mates found.")
outbam.close()
inbam.close()
else:
sys.exit("usage: %s <bam (uses less memory if sorted by readname)>" % sys.argv[0])
|
adamewing/bamsurgeon
|
scripts/rename_reads.py
|
Python
|
mit
| 1,358
|
[
"pysam"
] |
404c9748007142514a723ddf27cc4d9af5a0d7404e7f0ed854f2694315c141b5
|
"""Tests for BrillouinZone class."""
import numpy as np
from spglib import get_stabilized_reciprocal_mesh, relocate_BZ_grid_address
from phonopy.structure.brillouin_zone import BrillouinZone
def test_FCC():
"""Test BrillouinZone with FCC lattice."""
direct_lat = [[0, 2.73, 2.73], [2.73, 0, 2.73], [2.73, 2.73, 0]]
is_shift = [0, 0, 0]
_testBrillouinZone(direct_lat, [4, 4, 4], is_shift)
_testBrillouinZone(direct_lat, [5, 5, 5], is_shift)
def test_Hexagonal():
"""Test BrillouinZone with Hexagonal lattice."""
direct_lat = [
[3.0751691007292523, 0, 0],
[-1.5375845503646262, 2.6631745621644800, 0],
[0, 0, 3.5270080068586522],
]
is_shift = [0, 0, 0]
_testBrillouinZone(direct_lat, [4, 4, 4], is_shift)
_testBrillouinZone(direct_lat, [5, 5, 5], is_shift)
def _testBrillouinZone(direct_lat, mesh, is_shift):
_, grid_address = get_stabilized_reciprocal_mesh(
mesh,
rotations=[
np.eye(3, dtype="intc"),
],
is_shift=is_shift,
)
rec_lat = np.linalg.inv(direct_lat)
bz_grid_address, bz_map = relocate_BZ_grid_address(
grid_address, mesh, rec_lat, is_shift=is_shift
)
qpoints = (grid_address + np.array(is_shift) / 2.0) / mesh
bz = BrillouinZone(rec_lat)
bz.run(qpoints)
sv_all = bz.shortest_qpoints # including BZ boundary duplicates
sv = [v[0] for v in sv_all]
bz_qpoints = (bz_grid_address + np.array(is_shift) / 2.0) / mesh
d2_this = (np.dot(sv, rec_lat.T) ** 2).sum(axis=1)
d2_spglib = (np.dot(bz_qpoints[: np.prod(mesh)], rec_lat.T) ** 2).sum(axis=1)
diff = d2_this - d2_spglib
diff -= np.rint(diff)
# Following both of two tests are necessary.
# Check equivalence of vectors by lattice translation
np.testing.assert_allclose(diff, 0, atol=1e-8)
# Check being in same (hopefull first) Brillouin zone by their lengths
np.testing.assert_allclose(d2_this, d2_spglib, atol=1e-8)
|
atztogo/phonopy
|
test/structure/test_brillouin_zone.py
|
Python
|
bsd-3-clause
| 1,984
|
[
"phonopy"
] |
7ca64cf66a8df6515a020f26a33717da3145aa415e9db8b50a7b5e078442aed7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import datetime as dt
import ocw.data_source.dap as dap
from ocw.dataset import Dataset
class TestDap(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.url = 'http://test.opendap.org/opendap/data/ncml/agg/dated/'\
'CG2006158_120000h_usfc.nc'
cls.name = 'foo'
'''
The following exception handling should be removed once the opendap servers
are working fine. The following code was added to fix the build temporarily
More information - https://github.com/apache/climate/pull/419
'''
try :
cls.dataset = dap.load(cls.url, 'CGusfc', name=cls.name)
# The tests will be skipped if any exception is raised
except Exception as e:
raise unittest.SkipTest(e)
'''
# The tests fail for this dataset since the dataset does not have a time(1D) variable.
# The tests fail because dap.py assumes that the openDAP datasets necessarily have the
# three variables 1D variables lat, lon and time and the lines 53,54 and 55 are written
# on the same assumption.
cls.url2 = 'http://opendap-uat.jpl.nasa.gov/opendap/GeodeticsGravity/'\
'tellus/L3/mascon/RL05/JPL/CRI/netcdf/CLM4.SCALE_FACTOR.JPL.MSCNv01CRIv01.nc'
cls.name2 = 'foo2'
cls.dataset2 = dap.load(cls.url2, 'scale_factor', name=cls.name)'''
def test_dataset_is_returned(self):
self.assertTrue(isinstance(self.dataset, Dataset))
def test_correct_lat_shape(self):
self.assertEquals(len(self.dataset.lats), 29)
def test_correct_lon_shape(self):
self.assertEquals(len(self.dataset.lons), 26)
def test_correct_time_shape(self):
self.assertEquals(len(self.dataset.times), 1)
def test_valid_date_conversion(self):
start = dt.datetime(2006, 6, 7, 12)
self.assertTrue(start == self.dataset.times[0])
def test_custom_dataset_name(self):
self.assertEquals(self.dataset.name, self.name)
def test_dataset_origin(self):
self.assertEquals(self.dataset.origin['source'], 'dap')
self.assertEquals(self.dataset.origin['url'], self.url)
if __name__ == '__main__':
unittest.main()
|
apache/climate
|
ocw/tests/test_dap.py
|
Python
|
apache-2.0
| 3,042
|
[
"NetCDF"
] |
ee250711370381989b03c8194fcc6ecd48110e60707bfb09a5f380053db4d759
|
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
##!/mnt/lustre_fs/users/mjmcc/apps/python2.7/bin/python
# ----------------------------------------
# USAGE:
# ----------------------------------------
# PREAMBLE:
import sys
import numpy as np
from numpy.linalg import *
import MDAnalysis
from distance_functions import *
import scipy.stats
# ----------------------------------------
# VARIABLE DECLARATION
pdb_file = sys.argv[1]
traj_loc = sys.argv[2]
start = int(sys.argv[3])
end = int(sys.argv[4])
shapiro_test = True
nFrames = 2500
zeros = np.zeros
square = np.square
sqrt = np.sqrt
flush = sys.stdout.flush
important = 'protein'
# ----------------------------------------
# SUBROUTINES:
def ffprint(string):
print '%s' %(string)
flush()
# ----------------------------------------
# MAIN PROGRAM:
u = MDAnalysis.Universe(pdb_file)
u_important = u.select_atoms(important)
nRes = len(u_important.residues)
ffprint(nRes)
avg_matrix = zeros((nRes,nRes))
std_matrix = zeros((nRes,nRes))
temp_prot_com = zeros((nRes,3))
if shapiro_test:
shapiro_array = zeros((nFrames,nRes,3))
nSteps = 0
count = 0
while start <= end:
ffprint('Loading trajectory %s' %(start))
u.load_new('%sproduction.%s/production.%s.dcd' %(traj_loc,start,start))
nSteps += len(u.trajectory)
for ts in u.trajectory:
if ts.frame%1000 == 0:
ffprint('Working on timestep %d of trajectory %d' %(ts.frame, start))
for i in range(nRes):
temp_prot_com[i] = u_important.residues[i].center_of_mass()
if shapiro_test and ts.frame%10 == 0:
for i in range(nRes):
shapiro_array[count,i,:] = temp_prot_com[i]
count += 1
for i in range(nRes-1):
for j in range(i+1,nRes):
dist, dist2 = euclid_dist(temp_prot_com[i],temp_prot_com[j])
avg_matrix[i,j] += dist
std_matrix[i,j] += dist2
start +=1
ffprint(nSteps)
avg_matrix /= nSteps
std_matrix /= nSteps
std_matrix = sqrt(std_matrix - square(avg_matrix))
with open('%03d.%03d.avg_distance_matrix.dat' %(int(sys.argv[3]),end),'w') as f:
np.savetxt(f,avg_matrix)
with open('%03d.%03d.std_distance_matrix.dat' %(int(sys.argv[3]),end),'w') as f:
np.savetxt(f,std_matrix)
if shapiro_test:
with open('shapiro_wilks_test.dat','w') as f:
for i in range(nRes):
for j in range(3):
W, P = scipy.stats.shapiro(shapiro_array[:,i,j])
f.write('%d %f %f ' %(j,W,P))
f.write('\n')
|
rbdavid/Distance_matrix
|
pro_pro_distance_matrix_calc_shapiro_wilks.py
|
Python
|
gpl-3.0
| 2,370
|
[
"MDAnalysis"
] |
996931d4139407e7fe1f202b09eb23b88af8340493685f9d2369246927ee1aa9
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
default_license_tier:
description:
- Specifies the default license tier which would be used by new clouds.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as ENTERPRISE_18.
version_added: "2.5"
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
secure_channel_configuration:
description:
- Configure secure channel properties.
- Field introduced in 18.1.4, 18.2.1.
version_added: "2.9"
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
welcome_workflow_complete:
description:
- This flag is set once the initial controller setup workflow is complete.
- Field introduced in 18.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.9"
type: bool
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
admin_auth_configuration=dict(type='dict',),
default_license_tier=dict(type='str',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
secure_channel_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
welcome_workflow_complete=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
|
anryko/ansible
|
lib/ansible/modules/network/avi/avi_systemconfiguration.py
|
Python
|
gpl-3.0
| 7,091
|
[
"VisIt"
] |
be0dbd955e2eec3949a639d22fa84f7549051ccabe2b600071aa62c6fe95fbf3
|
# User creation spoke
#
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import os
from pyanaconda.flags import flags
from pyanaconda.core.i18n import _, CN_
from pyanaconda.core.users import crypt_password, guess_username, check_groupname
from pyanaconda import input_checking
from pyanaconda.core import constants
from pyanaconda.modules.common.constants.services import USERS
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.categories.user_settings import UserSettingsCategory
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.helpers import InputCheck
from pyanaconda.ui.gui.helpers import GUISpokeInputCheckHandler, GUIDialogInputCheckHandler
from pyanaconda.ui.gui.utils import blockedHandler, set_password_visibility
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.lib.users import get_user_list, set_user_list
from pyanaconda.core.regexes import GROUPLIST_FANCY_PARSE
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
__all__ = ["UserSpoke", "AdvancedUserDialog"]
class AdvancedUserDialog(GUIObject, GUIDialogInputCheckHandler):
"""
.. inheritance-diagram:: AdvancedUserDialog
:parts: 3
"""
builderObjects = ["advancedUserDialog", "uid", "gid"]
mainWidgetName = "advancedUserDialog"
uiFile = "spokes/advanced_user.glade"
def _validateGroups(self, inputcheck):
groups_string = self.get_input(inputcheck.input_obj)
# Pass if the string is empty
if not groups_string:
return InputCheck.CHECK_OK
# Check each group name in the list
for group in groups_string.split(","):
group_name = GROUPLIST_FANCY_PARSE.match(group).group('name')
valid, message = check_groupname(group_name)
if not valid:
return message or _("Invalid group name.")
return InputCheck.CHECK_OK
def __init__(self, user_spoke):
GUIObject.__init__(self, user_spoke)
saveButton = self.builder.get_object("save_button")
GUIDialogInputCheckHandler.__init__(self, saveButton)
self._user_spoke = user_spoke
# Track whether the user has requested a home directory other
# than the default. This way, if the home directory is left as
# the default, the default will change if the username changes.
# Otherwise, once the directory is set it stays that way.
self._origHome = None
def _grabObjects(self):
self._cUid = self.builder.get_object("c_uid")
self._cGid = self.builder.get_object("c_gid")
self._tHome = self.builder.get_object("t_home")
self._tGroups = self.builder.get_object("t_groups")
self._spinUid = self.builder.get_object("spin_uid")
self._spinGid = self.builder.get_object("spin_gid")
self._uid = self.builder.get_object("uid")
self._gid = self.builder.get_object("gid")
def initialize(self):
GUIObject.initialize(self)
self._grabObjects()
# Validate the group input box
self.add_check(self._tGroups, self._validateGroups)
# Send ready signal to main event loop
hubQ.send_ready(self.__class__.__name__, False)
@property
def user(self):
"""Shortcut to user data from the user spoke."""
return self._user_spoke.user
def refresh(self):
# start be reloading the user data from the user spoke
if self.user.homedir:
homedir = self.user.homedir
elif self.user.name:
homedir = "/home/" + self.user.name
self._tHome.set_text(homedir)
self._origHome = homedir
self._cUid.set_active(bool(self.user.uid))
self._cGid.set_active(bool(self.user.gid))
self._spinUid.update()
self._spinGid.update()
self._tGroups.set_text(", ".join(self.user.groups))
def apply(self):
# Copy data from the UI back to the user data object
homedir = self._tHome.get_text()
# If the user cleared the home directory, revert back to the
# default
if not homedir:
self.user.homedir = None
# If the user modified the home directory input, save that the
# home directory has been modified and use the value.
elif self._origHome != homedir:
if not os.path.isabs(homedir):
homedir = "/" + homedir
self.user.homedir = homedir
# Otherwise leave the home directory alone. If the home
# directory is currently the default value, the next call
# to refresh() will update the input text to reflect
# changes in the username.
if self._cUid.get_active():
self.user.uid = int(self._uid.get_value())
else:
self.user.uid = None
if self._cGid.get_active():
self.user.gid = int(self._gid.get_value())
else:
self.user.gid = None
# ''.split(',') returns [''] instead of [], which is not what we want
self.user.groups = [g.strip() for g in self._tGroups.get_text().split(",") if g]
# Send ready signal to main event loop
hubQ.send_ready(self.__class__.__name__, False)
def run(self):
self.window.show()
while True:
rc = self.window.run()
#OK clicked
if rc == 1:
# Input checks pass
if self.on_ok_clicked():
self.apply()
break
# Input checks fail, try again
else:
continue
#Cancel clicked, window destroyed...
else:
break
self.window.hide()
return rc
def on_uid_checkbox_toggled(self, togglebutton, data=None):
# Set the UID spinner sensitivity based on the UID checkbox
self._spinUid.set_sensitive(togglebutton.get_active())
def on_gid_checkbox_toggled(self, togglebutton, data=None):
# Same as above, for GID
self._spinGid.set_sensitive(togglebutton.get_active())
def on_uid_mnemonic_activate(self, widget, group_cycling, user_data=None):
# If this is the only widget with the mnemonic (group_cycling is False),
# and the checkbox is not currently toggled, toggle the checkbox and
# then set the focus to the UID spinner
if not group_cycling and not widget.get_active():
widget.set_active(True)
self._spinUid.grab_focus()
return True
# Otherwise just use the default signal handler
return False
def on_gid_mnemonic_activate(self, widget, group_cycling, user_data=None):
# Same as above, but for GID
if not group_cycling and not widget.get_active():
widget.set_active(True)
self._spinGid.grab_focus()
return True
return False
class UserSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler):
"""
.. inheritance-diagram:: UserSpoke
:parts: 3
"""
builderObjects = ["userCreationWindow"]
mainWidgetName = "userCreationWindow"
focusWidgetName = "fullname_entry"
uiFile = "spokes/user.glade"
helpFile = "UserSpoke.xml"
category = UserSettingsCategory
icon = "avatar-default-symbolic"
title = CN_("GUI|Spoke", "_User Creation")
@classmethod
def should_run(cls, environment, data):
# the user spoke should run always in the anaconda and in firstboot only
# when doing reconfig or if no user has been created in the installation
users_module = USERS.get_proxy()
user_list = get_user_list(users_module)
if environment == constants.ANACONDA_ENVIRON:
return True
elif environment == constants.FIRSTBOOT_ENVIRON and data is None:
# cannot decide, stay in the game and let another call with data
# available (will come) decide
return True
elif environment == constants.FIRSTBOOT_ENVIRON and data and not user_list:
return True
else:
return False
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
GUISpokeInputCheckHandler.__init__(self)
self._users_module = USERS.get_proxy()
def initialize(self):
NormalSpoke.initialize(self)
self.initialize_start()
# We consider user creation requested if there was at least one user
# in the DBus module user list at startup.
# We also remember how the user was called so that we can clear it
# in a reasonably safe way & if it was cleared.
self._user_list = get_user_list(self._users_module, add_default=True)
self._user_requested = False
self._requested_user_cleared = False
# if user has a name, it's an actual user that has been requested,
# rather than a default user added by us
if self.user.name:
self._user_requested = True
# gather references to relevant GUI objects
# entry fields
self._fullname_entry = self.builder.get_object("fullname_entry")
self._username_entry = self.builder.get_object("username_entry")
self._password_entry = self.builder.get_object("password_entry")
self._password_confirmation_entry = self.builder.get_object("password_confirmation_entry")
# check boxes
self._admin_checkbox = self.builder.get_object("admin_checkbox")
self._password_required_checkbox = self.builder.get_object("password_required_checkbox")
# advanced user configration dialog button
self._advanced_button = self.builder.get_object("advanced_button")
# password checking status bar & label
self._password_bar = self.builder.get_object("password_bar")
self._password_label = self.builder.get_object("password_label")
# Install the password checks:
# - Has a password been specified?
# - If a password has been specified and there is data in the confirm box, do they match?
# - How strong is the password?
# - Does the password contain non-ASCII characters?
# Setup the password checker for password checking
self._checker = input_checking.PasswordChecker(
initial_password_content = self.password,
initial_password_confirmation_content = self.password_confirmation,
policy = input_checking.get_policy(self.data, "user")
)
# configure the checker for password checking
self.checker.username = self.username
self.checker.secret_type = constants.SecretType.PASSWORD
# remove any placeholder texts if either password or confirmation field changes content from initial state
self.checker.password.changed_from_initial_state.connect(self.remove_placeholder_texts)
self.checker.password_confirmation.changed_from_initial_state.connect(self.remove_placeholder_texts)
# connect UI updates to check results
self.checker.checks_done.connect(self._checks_done)
# username and full name checks
self._username_check = input_checking.UsernameCheck()
self._fullname_check = input_checking.FullnameCheck()
# empty username is considered a success so that the user can leave
# the spoke without filling it in
self._username_check.success_if_username_empty = True
# check that the password is not empty
self._empty_check = input_checking.PasswordEmptyCheck()
# check that the content of the password field & the conformation field are the same
self._confirm_check = input_checking.PasswordConfirmationCheck()
# check password validity, quality and strength
self._validity_check = input_checking.PasswordValidityCheck()
# connect UI updates to validity check results
self._validity_check.result.password_score_changed.connect(self.set_password_score)
self._validity_check.result.status_text_changed.connect(self.set_password_status)
# check if the password contains non-ascii characters
self._ascii_check = input_checking.PasswordASCIICheck()
# register the individual checks with the checker in proper order
# 0) is the username and fullname valid ?
# 1) is the password non-empty ?
# 2) are both entered passwords the same ?
# 3) is the password valid according to the current password checking policy ?
# 4) is the password free of non-ASCII characters ?
self.checker.add_check(self._username_check)
self.checker.add_check(self._fullname_check)
self.checker.add_check(self._empty_check)
self.checker.add_check(self._confirm_check)
self.checker.add_check(self._validity_check)
self.checker.add_check(self._ascii_check)
self.guesser = {
self.username_entry: True
}
# Configure levels for the password bar
self.password_bar.add_offset_value("low", 2)
self.password_bar.add_offset_value("medium", 3)
self.password_bar.add_offset_value("high", 4)
# Modify the GUI based on the kickstart and policy information
# This needs to happen after the input checks have been created, since
# the Gtk signal handlers use the input check variables.
password_set_message = _("The password was set by kickstart.")
if self.password_kickstarted:
self.password_required = True
self.password_entry.set_placeholder_text(password_set_message)
self.password_confirmation_entry.set_placeholder_text(password_set_message)
elif not self.checker.policy.emptyok:
# Policy is that a non-empty password is required
self.password_required = True
if not self.checker.policy.emptyok:
# User isn't allowed to change whether password is required or not
self.password_required_checkbox.set_sensitive(False)
self._advanced_user_dialog = AdvancedUserDialog(self)
self._advanced_user_dialog.initialize()
# set the visibility of the password entries
set_password_visibility(self.password_entry, False)
set_password_visibility(self.password_confirmation_entry, False)
# report that we are done
self.initialize_done()
@property
def username_entry(self):
return self._username_entry
@property
def username(self):
return self.username_entry.get_text()
@username.setter
def username(self, new_username):
self.username_entry.set_text(new_username)
@property
def fullname_entry(self):
return self._fullname_entry
@property
def fullname(self):
return self.fullname_entry.get_text()
@fullname.setter
def fullname(self, new_fullname):
self.fullname_entry.set_text(new_fullname)
@property
def password_required_checkbox(self):
return self._password_required_checkbox
@property
def password_required(self):
return self.password_required_checkbox.get_active()
@password_required.setter
def password_required(self, value):
self.password_required_checkbox.set_active(value)
@property
def user(self):
"""The user that is manipulated by the User spoke.
This user is always the first one in the user list.
:return: a UserData instance
"""
return self._user_list[0]
def refresh(self):
# user data could have changed in the Users DBus module
# since the last visit, so reload it from DBus
#
# In the case that the user list is empty or
# a requested user has been cleared from the list in previous
# spoke visit we need to have an empty user instance prepended
# to the list.
self._user_list = get_user_list(self._users_module, add_default=True, add_if_not_empty=self._requested_user_cleared)
self.username = self.user.name
self.fullname = self.user.gecos
self._admin_checkbox.set_active(self.user.has_admin_priviledges())
# rerun checks so that we have a correct status message, if any
self.checker.run_checks()
@property
def status(self):
user_list = get_user_list(self._users_module)
if not user_list:
return _("No user will be created")
elif user_list[0].has_admin_priviledges():
return _("Administrator %s will be created") % user_list[0].name
else:
return _("User %s will be created") % user_list[0].name
@property
def mandatory(self):
"""Only mandatory if no admin user has been requested."""
return not self._users_module.CheckAdminUserExists()
def apply(self):
# set the password only if the user enters anything to the text entry
# this should preserve the kickstart based password
if self.password_required:
if self.password:
self.password_kickstarted = False
self.user.password = crypt_password(self.password)
self.user.is_crypted = True
self.remove_placeholder_texts()
# reset the password when the user unselects it
else:
self.remove_placeholder_texts()
self.user.password = ""
self.user.is_crypted = False
self.password_kickstarted = False
self.user.name = self.username
self.user.gecos = self.fullname
# We make it possible to clear users requested from kickstart (or DBus API)
# during an interactive installation. This is done by setting their name
# to "". Then during apply() we will check the user name and if it is
# equal to "", we will remember that locally and not forward the user which
# has been cleared to the DBus module, by using the remove_uset flag
# for the set_user_list function.
# record if the requested user has been explicitely unset
self._requested_user_cleared = not self.user.name
# clear the unset user (if any)
set_user_list(self._users_module, self._user_list, remove_unset=True)
@property
def sensitive(self):
# Spoke cannot be entered if a user was set in the kickstart and the user
# policy doesn't allow changes.
return not (self.completed and flags.automatedInstall
and self._user_requested and not self.checker.policy.changesok)
@property
def completed(self):
return bool(get_user_list(self._users_module))
def password_required_toggled(self, togglebutton=None, data=None):
"""Called by Gtk callback when the "Use password" check
button is toggled. It will make password entries in/sensitive."""
password_is_required = togglebutton.get_active()
self.password_entry.set_sensitive(password_is_required)
self.password_confirmation_entry.set_sensitive(password_is_required)
# also disable/enable corresponding password checks
self._empty_check.skip = not password_is_required
self._confirm_check.skip = not password_is_required
self._validity_check.skip = not password_is_required
self._ascii_check.skip = not password_is_required
# and rerun the checks
self.checker.run_checks()
def on_password_icon_clicked(self, entry, icon_pos, event):
"""Called by Gtk callback when the icon of a password entry is clicked."""
set_password_visibility(entry, not entry.get_visibility())
def on_username_set_by_user(self, editable, data=None):
"""Called by Gtk on user-driven changes to the username field.
This handler is blocked during changes from the username guesser.
"""
# If the user set a user name, turn off the username guesser.
# If the user cleared the username, turn it back on.
if editable.get_text():
self.guesser = False
else:
self.guesser = True
def on_username_changed(self, editable, data=None):
"""Called by Gtk on all username changes."""
new_username = editable.get_text()
# Disable the advanced user dialog button when no username is set
if editable.get_text():
self._advanced_button.set_sensitive(True)
else:
self._advanced_button.set_sensitive(False)
# update the username in checker
self.checker.username = new_username
# Skip the empty password checks if no username is set,
# otherwise the user will not be able to leave the
# spoke if password is not set but policy requires that.
self._empty_check.skip = not new_username
self._validity_check.skip = not new_username
# Re-run the password checks against the new username
self.checker.run_checks()
def on_full_name_changed(self, editable, data=None):
"""Called by Gtk callback when the full name field changes."""
fullname = editable.get_text()
if self.guesser:
username = guess_username(fullname)
with blockedHandler(self.username_entry, self.on_username_set_by_user):
self.username = username
self.checker.fullname = fullname
# rerun the checks
self.checker.run_checks()
def on_admin_toggled(self, togglebutton, data=None):
# Add or remove user admin status based on changes to the admin checkbox
self.user.set_admin_priviledges(togglebutton.get_active())
def on_advanced_clicked(self, _button, data=None):
"""Handler for the Advanced.. button. It starts the Advanced dialog
for setting homedir, uid, gid and groups.
"""
self.user.name = self.username
self._advanced_user_dialog.refresh()
with self.main_window.enlightbox(self._advanced_user_dialog.window):
self._advanced_user_dialog.run()
self._admin_checkbox.set_active(self.user.has_admin_priviledges())
def _checks_done(self, error_message):
"""Update the warning with the input validation error from the first
error message or clear warnings if all the checks were successful.
Also appends the "press twice" suffix if compatible with current
password policy and handles the press-done-twice logic.
"""
# check if an unwaivable check failed
unwaivable_checks = [not self._confirm_check.result.success,
not self._username_check.result.success,
not self._fullname_check.result.success,
not self._empty_check.result.success]
# with emptyok == False the empty password check become unwaivable
#if not self.checker.policy.emptyok:
# unwaivable_checks.append(not self._empty_check.result.success)
unwaivable_check_failed = any(unwaivable_checks)
# set appropriate status bar message
if not error_message:
# all is fine, just clear the message
self.clear_info()
elif not self.username and not self.password and not self.password_confirmation:
# Clear any info message if username and both the password and password
# confirmation fields are empty.
# This shortcut is done to make it possible for the user to leave the spoke
# without inputting any username or password. Separate logic makes sure an
# empty string is not unexpectedly set as the user password.
self.clear_info()
elif not self.username and not self.password and not self.password_confirmation:
# Also clear warnings if username is set but empty password is fine.
self.clear_info()
else:
if self.checker.policy.strict or unwaivable_check_failed:
# just forward the error message
self.show_warning_message(error_message)
else:
# add suffix for the click twice logic
self.show_warning_message("{} {}".format(error_message,
_(constants.PASSWORD_DONE_TWICE)))
# check if the spoke can be exited after the latest round of checks
self._check_spoke_exit_conditions(unwaivable_check_failed)
def _check_spoke_exit_conditions(self, unwaivable_check_failed):
"""Check if the user can escape from the root spoke or stay forever !"""
# reset any waiving in progress
self.waive_clicks = 0
# Depending on the policy we allow users to waive the password strength
# and non-ASCII checks. If the policy is set to strict, the password
# needs to be strong, but can still contain non-ASCII characters.
self.can_go_back = False
self.needs_waiver = True
# This shortcut is done to make it possible for the user to leave the spoke
# without inputting anything. Separate logic makes sure an
# empty string is not unexpectedly set as the user password.
if not self.username and not self.password and not self.password_confirmation:
self.can_go_back = True
self.needs_waiver = False
elif self.checker.success:
# if all checks were successful we can always go back to the hub
self.can_go_back = True
self.needs_waiver = False
elif unwaivable_check_failed:
self.can_go_back = False
elif not self.password and not self.password_confirmation:
self.can_go_back = True
self.needs_waiver = False
else:
if self.checker.policy.strict:
if not self._validity_check.result.success:
# failing validity check in strict
# mode prevents us from going back
self.can_go_back = False
elif not self._ascii_check.result.success:
# but the ASCII check can still be waived
self.can_go_back = True
self.needs_waiver = True
else:
self.can_go_back = True
self.needs_waiver = False
else:
if not self._confirm_check.result.success:
self.can_go_back = False
if not self._validity_check.result.success:
self.can_go_back = True
self.needs_waiver = True
elif not self._ascii_check.result.success:
self.can_go_back = True
self.needs_waiver = True
else:
self.can_go_back = True
self.needs_waiver = False
def on_back_clicked(self, button):
# the GUI spoke input check handler handles the spoke exit logic for us
if self.try_to_go_back():
NormalSpoke.on_back_clicked(self, button)
else:
log.info("Return to hub prevented by password checking rules.")
|
atodorov/anaconda
|
pyanaconda/ui/gui/spokes/user.py
|
Python
|
gpl-2.0
| 28,258
|
[
"VisIt"
] |
f2312d48913d37953fb06cf45859e81975725592dde63b64b7303a2f92397bca
|
#
# examples for SourceUndulator to be used in ShadowOui
#
import numpy
from syned.storage_ring.electron_beam import ElectronBeam
from shadow4.sources.undulator.s4_undulator import S4Undulator
from shadow4.sources.undulator.s4_undulator_light_source import S4UndulatorLightSource
if __name__ == "__main__":
from srxraylib.plot.gol import plot, set_qt
set_qt()
do_plots = True
ebeam = ElectronBeam(energy_in_GeV=6.04,
energy_spread = 0.0,
current = 0.2,
number_of_bunches = 400,
moment_xx=(400e-6)**2,
moment_xxp=0.0,
moment_xpxp=(10e-6)**2,
moment_yy=(10e-6)**2,
moment_yyp=0.0,
moment_ypyp=(4e-6)**2 )
und = S4Undulator(
K_vertical=0.25, # syned Undulator parameter
period_length=0.032, # syned Undulator parameter
number_of_periods=50, # syned Undulator parameter
emin=10490.0, # Photon energy scan from energy (in eV)
emax=10510.0, # Photon energy scan to energy (in eV)
ng_e=3, # Photon energy scan number of points
maxangle=0.015, # Maximum radiation semiaperture in RADIANS
ng_t=100, # Number of points in angle theta
ng_p=11, # Number of points in angle phi
ng_j=20, # Number of points in electron trajectory (per period) for internal calculation only
code_undul_phot="internal", # internal, pysru, srw
flag_emittance=0, # when sampling rays: Use emittance (0=No, 1=Yes)
flag_size=2, # when sampling rays: 0=point,1=Gaussian,2=FT(Divergences)
)
print("gamma: ", ebeam.gamma())
print("resonance: ", und.resonance_energy(ebeam.gamma()))
und.set_energy_monochromatic(und.resonance_energy(ebeam.gamma()))
# sourceundulator._MAXANGLE *= 1.2
# print(und.info())
ls = S4UndulatorLightSource(name="", electron_beam=ebeam, undulator_magnetic_structure=und)
beam = ls.get_beam(user_unit_to_m=1.0,F_COHER=0,NRAYS=15000,SEED=5655452)
print(ls.info())
#
# plot
#
if do_plots:
from srxraylib.plot.gol import plot_image, plot_scatter
radiation,photon_energy, theta,phi = ls.get_radiation_polar()
plot_image(radiation[0],1e6*theta,phi,aspect='auto',title="intensity",xtitle="theta [urad]",ytitle="phi [rad]")
radiation_interpolated,photon_energy, vx,vz = ls.get_radiation_interpolated_cartesian()
plot_image(radiation_interpolated[0],vx,vz,aspect='auto',title="intensity interpolated in cartesian grid",xtitle="vx",ytitle="vy")
polarization = ls.get_result_polarization()
plot_image(polarization[0],1e6*theta,phi,aspect='auto',title="polarization",xtitle="theta [urad]",ytitle="phi [rad]")
print("Beam intensity: ",beam.get_column(23).sum())
print("Beam intensity s-pol: ",beam.get_column(24).sum())
print("Beam intensity: p-pol",beam.get_column(25).sum())
#
# plot
#
if do_plots:
plot_scatter(1e6*beam.rays[:,0],1e6*beam.rays[:,2],title="real space",xtitle="X [um]",ytitle="Z [um]",show=False)
plot_scatter(1e6*beam.rays[:,3],1e6*beam.rays[:,5],title="divergence space",xtitle="X [urad]",ytitle="Z [urad]",show=True)
plot(ls.get_photon_size_distribution()[0]*1e6,
ls.get_photon_size_distribution()[1],
title="Photon size distribution",xtitle="R [um]",ytitle="Intensity [a.u.]")
# check the correct size sampling (values must agree for FLAG_SIZE=1!!!)
x_photon = beam.rays[:,0]
z_photon = beam.rays[:,2]
R = numpy.sqrt(x_photon**2 + z_photon**2)
print(">> s_phot, Std R", ls.get_result_photon_size_sigma(), numpy.sqrt((R ** 2).sum() / (R.size - 1)))
print(">> s_phot, Std X", ls.get_result_photon_size_sigma(), numpy.sqrt((x_photon ** 2).sum() / (x_photon.size - 1)))
print(">> s_phot, Std Z", ls.get_result_photon_size_sigma(), numpy.sqrt((z_photon ** 2).sum() / (z_photon.size - 1)))
|
srio/minishadow
|
examples/sources/example_source_undulator.py
|
Python
|
mit
| 4,007
|
[
"Gaussian"
] |
a5008cb5243dfcc728dddc163babf9b993e4faa993a8a1fbbba62a5ae5667779
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#CREATED:2016-02-01 16:50:04 by Brian McFee <brian.mcfee@nyu.edu>
'''Deprecation utilities'''
import inspect
import warnings
class Deprecated(object):
'''A dummy class to catch usage of deprecated variable names'''
def __repr__(self):
return '<DEPRECATED parameter>'
def rename_kw(old_name, old_value, new_name, new_value, version_deprecated, version_removed):
'''Handle renamed arguments.
Parameters
----------
old_name : str
old_value
The name and value of the old argument
new_name : str
new_value
The name and value of the new argument
version_deprecated : str
The version at which the old name became deprecated
version_removed : str
The version at which the old name will be removed
Returns
-------
value
- `new_value` if `old_value` of type `Deprecated`
- `old_value` otherwise
Warnings
--------
if `old_value` is not of type `Deprecated`
'''
if isinstance(old_value, Deprecated):
return new_value
else:
stack = inspect.stack()
dep_func = stack[1]
caller = stack[2]
warnings.warn_explicit("{:s}() keyword argument '{:s}' has been renamed to '{:s}' in "
"version {:}."
"\n\tThis alias will be removed in version "
"{:}.".format(dep_func[3],
old_name, new_name,
version_deprecated, version_removed),
category=DeprecationWarning,
filename=caller[1],
lineno=caller[2])
return old_value
|
craffel/librosa
|
librosa/util/deprecation.py
|
Python
|
isc
| 1,812
|
[
"Brian"
] |
cb4b8638797a5a0b2194d5aec5568c809918cf917bc6b3469754a22d2232e3cb
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import numpy as np
from pymatgen.phasediagram.entries import PDEntryIO
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.plotter import PDPlotter, uniquelines, \
triangular_coord, tet_coord
module_dir = os.path.dirname(os.path.abspath(__file__))
class PDPlotterTest(unittest.TestCase):
def setUp(self):
(elements, entries) = PDEntryIO.from_csv(os.path.join(module_dir, "pdentries_test.csv"))
self.pd = PhaseDiagram(entries)
self.plotter = PDPlotter(self.pd)
def test_pd_plot_data(self):
(lines, labels, unstable_entries) = self.plotter.pd_plot_data
self.assertEqual(len(lines), 22)
self.assertEqual(len(labels), len(self.pd.stable_entries), "Incorrect number of lines generated!")
self.assertEqual(len(unstable_entries), len(self.pd.all_entries) - len(self.pd.stable_entries), "Incorrect number of lines generated!")
class UtilityFunctionTest(unittest.TestCase):
def test_unique_lines(self):
testdata = [[5, 53, 353], [399, 20, 52], [399, 400, 20], [13, 399, 52],
[21, 400, 353], [393, 5, 353], [400, 393, 353],
[393, 400, 399], [393, 13, 5], [13, 393, 399],
[400, 17, 20], [21, 17, 400]]
expected_ans = set([(5, 393), (21, 353), (353, 400), (5, 13), (17, 20),
(21, 400), (17, 400), (52, 399), (393, 399),
(20, 52), (353, 393), (5, 353), (5, 53), (13, 399),
(393, 400), (13, 52), (53, 353), (17, 21),
(13, 393), (20, 399), (399, 400), (20, 400)])
self.assertEqual(uniquelines(testdata), expected_ans)
def test_triangular_coord(self):
coord = [0.5, 0.5]
coord = triangular_coord(coord)
self.assertTrue(np.allclose(coord, [ 0.75, 0.4330127]))
def test_tet_coord(self):
coord = [0.5, 0.5, 0.5]
coord = tet_coord(coord)
self.assertTrue(np.allclose(coord, [ 1., 0.57735027, 0.40824829]))
if __name__ == '__main__':
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/phasediagram/tests/test_plotter.py
|
Python
|
mit
| 2,182
|
[
"pymatgen"
] |
08d5535e3055445559e407601b7d417fcde80700e2a5ffd5a073fcd79bcbe707
|
# -*- coding: utf-8 -*-
# Copyright 2013-2014 Victor Amin, http://vamin.net/
"""MESS.DB balloon method
This module contains the balloon method class and load function.
"""
from __future__ import print_function
from __future__ import unicode_literals
import binascii
import codecs
import os
import subprocess
import sys
import time
import pybel
from mess.method import AbstractMethod
from mess.utils import setup_dir
class Balloon(AbstractMethod):
"""This method uses balloon to generate 3D structures from 0D strings."""
# method info
description = 'Generate 3d structures from InChI with balloon'
geop = 1
# program info
prog_name = 'Balloon'
prog_version = '' # set dynamically by property method
prog_url = 'http://users.abo.fi/mivainio/balloon/'
prog_citation = ('Mikko J. Vainio and Mark S. Johnson (2007) Generating '
'Conformer Ensembles Using a Multiobjective Genetic '
'Algorithm. Journal of Chemical Information and '
'Modeling, 47, 2462 - 2474.')
# parameters
parameters = {'--maxtime': '1024',
'--nGenerations': '1024',
'--maxFlipDistance': '32',
'--distanceDependent': '',
'--fullforce': '',
'--singleconf': '',
'--randomSeed': '#crc32(inchikey)'}
@property
def prog_version(self):
"""Get prog_version from call to balloon."""
try:
balloon = subprocess.Popen(['balloon'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return balloon.stdout.read().split()[2]
except OSError:
sys.exit('The %s method requires Balloon (%s).' %
(self.method_name, self.prog_url))
def check_dependencies(self):
"""Check that $BALLOON_FORCEFIELD is set."""
# setting prog_version checks for balloon, so no need here
if not os.environ.get('BALLOON_FORCEFIELD'):
sys.exit(('You must set the $BALLOON_FORCEFIELD environment '
'variable to the path to MMFF94.mff.'))
return True
def map(self, inchikey, inchikey_dir):
"""Generate 3D structures with Balloon."""
self.inchikey = inchikey
start = time.time()
out_dir = os.path.realpath(os.path.join(inchikey_dir, self.method_dir))
setup_dir(out_dir)
sdf_out = os.path.realpath(os.path.join(out_dir,
'%s.sdf' % self.inchikey))
xyz_out = os.path.join(out_dir, '%s.xyz' % self.inchikey)
messages = []
if not self.check(xyz_out):
query = 'SELECT smiles FROM molecule WHERE inchikey=?'
r = self.db.execute(query, (self.inchikey,)).next()
# get positive 32-bit integer
seed = binascii.crc32(inchikey) & 0xffffffff
try:
os.remove(sdf_out)
except OSError:
pass
balloon_cmd = ['balloon']
for k, v in self.parameters.items():
if k.startswith('#') or v.startswith('#'):
continue
balloon_cmd.append(k)
if v:
balloon_cmd.append(v)
balloon_cmd.extend(['--randomSeed', str(seed), r.smiles, sdf_out])
balloon = subprocess.Popen(balloon_cmd, cwd=out_dir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
balloon.stdin.write('Y') # in case balloon asks about overwrite
messages.append(balloon.stdout.read())
messages.append(balloon.stderr.read())
forcefield = b'mmff94s'
steps = 512
moldata = {}
try:
mol = pybel.readfile('sdf', sdf_out).next()
mol.write(b'xyz', str(xyz_out))
for query, values in self.get_insert_moldata_queries(
self.inchikey,
mol,
description='balloon molecule data'):
try:
moldata[query].append(values)
except KeyError:
moldata[query] = [values]
except IOError:
sdf_bad = os.path.join(out_dir, '%s_bad.sdf' % inchikey)
try:
mol = pybel.readfile('sdf', sdf_bad).next()
mol.localopt(forcefield=forcefield, steps=steps)
self.log_all.info(('"bad" %s sdf cleaned up '
'with %s forcefield '
'and %i steps'),
self.inchikey,
forcefield,
steps)
mol.write(b'xyz', str(xyz_out))
except IOError:
pass
if self.check(xyz_out):
if abs(mol.molwt - pybel.readstring('smi',
r.smiles).molwt) > 0.001:
moldata = {}
mol = pybel.readstring(b'smi', str(r.smiles))
mol.make3D(forcefield, steps)
mol.write(b'xyz', str(xyz_out), overwrite=True)
self.log_all.info(('%s 3D coordinates generation '
'attempted by '
'Open Babel rule-based algorithm '
'(forcefields=%s steps=%i) instead of '
'balloon due to hydrogen atom '
'mismatch'),
self.inchikey, forcefield, steps)
else:
moldata = {}
mol = pybel.readstring(b'smi', str(r.smiles))
mol.make3D(forcefield, steps)
mol.write(b'xyz', str(xyz_out), overwrite=True)
self.log_all.info(('%s 3D coordinates generation attempted by '
'Open Babel rule-based algorithm '
'(forcefields=%s steps=%i) instead of '
'balloon due to unexpected failure'),
self.inchikey, forcefield, steps)
if self.check(xyz_out):
self.log_all.info('%s 3D coordinates generated successfully',
self.inchikey)
else:
self.log_all.warning('%s coordinate generation failed',
self.inchikey)
for query, values in moldata.iteritems():
for v in values:
yield query, v
yield self.get_timing_query(self.inchikey, start)
else:
self.log_console.info('%s skipped', self.inchikey)
def check(self, xyz_out):
"""Check that a valid xyz file was created.
Args:
xyz_out: Path to the xyz file generated by the balloon method.
Returns:
True if valid xyz, False otherwise.
"""
try:
with codecs.open(xyz_out, encoding='utf-8') as f:
for i, l in enumerate(f):
if i == 0:
atoms = l.strip()
lines = i + 1
if int(atoms) == lines - 2:
return True
else:
return False
except IOError:
return False
def load():
"""Load Balloon()."""
return Balloon()
|
vamin/MESS.DB
|
mess/methods/balloon.py
|
Python
|
agpl-3.0
| 7,848
|
[
"Open Babel",
"Pybel"
] |
0c1cc9035173eacd42d292c75a98a8019a18d96ff03b04c5c58eb5a220e82e75
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a resource file that renders a static web page.
To test the script, rename the file to hello.rpy, and move it to any directory,
let's say /var/www/html/.
Now, start your Twisted web server:
$ twistd -n web --path /var/www/html/
And visit http://127.0.0.1:8080/hello.rpy with a web browser.
"""
from twisted.web import static
import time
now = time.ctime()
d = '''\
<HTML><HEAD><TITLE>Hello Rpy</TITLE>
<H1>Hello World, It is Now %(now)s</H1>
<UL>
''' % vars()
for i in range(10):
d += "<LI>%(i)s" % vars()
d += '''\
</UL>
</BODY></HTML>
'''
resource = static.Data(d, 'text/html')
|
EricMuller/mynotes-backend
|
requirements/twisted/Twisted-17.1.0/docs/web/examples/hello.rpy.py
|
Python
|
mit
| 685
|
[
"VisIt"
] |
6038ea3eb6c745d21899c06018cefeee669bdf764b56843ed18f892b726cd655
|
""" RemoveLambdas turns lambda into regular functions. """
from pythran.analyses import GlobalDeclarations, ImportedIds
from pythran.passmanager import Transformation
from pythran.tables import MODULES
from pythran.conversion import mangle
import pythran.metadata as metadata
from copy import copy
import gast as ast
class _LambdaRemover(ast.NodeTransformer):
def __init__(self, parent, prefix):
super(_LambdaRemover, self).__init__()
self.prefix = prefix
self.parent = parent
def __getattr__(self, attr):
return getattr(self.parent, attr)
def visit_Lambda(self, node):
if MODULES['functools'] not in self.global_declarations.values():
import_ = ast.Import([ast.alias('functools', mangle('functools'))])
self.imports.append(import_)
functools_module = MODULES['functools']
self.global_declarations[mangle('functools')] = functools_module
self.generic_visit(node)
forged_name = "{0}_lambda{1}".format(
self.prefix,
len(self.lambda_functions))
ii = self.gather(ImportedIds, node)
ii.difference_update(self.lambda_functions) # remove current lambdas
binded_args = [ast.Name(iin, ast.Load(), None, None)
for iin in sorted(ii)]
node.args.args = ([ast.Name(iin, ast.Param(), None, None)
for iin in sorted(ii)] +
node.args.args)
forged_fdef = ast.FunctionDef(
forged_name,
copy(node.args),
[ast.Return(node.body)],
[], None, None)
metadata.add(forged_fdef, metadata.Local())
self.lambda_functions.append(forged_fdef)
self.global_declarations[forged_name] = forged_fdef
proxy_call = ast.Name(forged_name, ast.Load(), None, None)
if binded_args:
return ast.Call(
ast.Attribute(
ast.Name(mangle('functools'), ast.Load(), None, None),
"partial",
ast.Load()
),
[proxy_call] + binded_args,
[])
else:
return proxy_call
class RemoveLambdas(Transformation):
"""
Turns lambda into top-level functions.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(y): lambda x:y+x")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(RemoveLambdas, node)
>>> print(pm.dump(backend.Python, node))
import functools as __pythran_import_functools
def foo(y):
__pythran_import_functools.partial(foo_lambda0, y)
def foo_lambda0(y, x):
return (y + x)
"""
def __init__(self):
super(RemoveLambdas, self).__init__(GlobalDeclarations)
def visit_Module(self, node):
self.lambda_functions = list()
self.imports = list()
self.generic_visit(node)
node.body = self.imports + node.body + self.lambda_functions
self.update |= bool(self.imports) or bool(self.lambda_functions)
return node
def visit_FunctionDef(self, node):
lr = _LambdaRemover(self, node.name)
node.body = [lr.visit(n) for n in node.body]
return node
|
pombredanne/pythran
|
pythran/transformations/remove_lambdas.py
|
Python
|
bsd-3-clause
| 3,311
|
[
"VisIt"
] |
c4099cacaa6f1c9774b4b5b261b3027d2d7b35c73d8a58f39c8cf6cbc93e14c6
|
#!/usr/bin/python
import os
import math
import re
import numpy as nm
import pandas as pd
import xml.etree.ElementTree
import pickle
import SAP
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
import urllib
def similarityScore(seq1, seq2):
# Source: SAP codebase, 'UtilityFunctions.py'
assert (len(seq1) == len(seq2))
seq1LeftBoundary = len(re.search("^(-*)", seq1).groups()[0])
seq1RightBoundary = len(seq1) - len(re.search("(-*)$", seq1).groups()[0])
seq2LeftBoundary = len(re.search("^(-*)", seq2).groups()[0])
seq2RightBoundary = len(seq2) - len(re.search("(-*)$", seq2).groups()[0])
leftBoundary = max(seq1LeftBoundary, seq2LeftBoundary)
rightboundary = min(seq1RightBoundary, seq2RightBoundary)
seq1Trunc = seq1[leftBoundary:rightboundary]
seq2Trunc = seq2[leftBoundary:rightboundary]
length = len(seq1Trunc)
extraGaps = 0
ident = 0
for i in range(0, length):
if seq1Trunc[i] == "-" and seq2Trunc[i] == "-":
extraGaps += 1
continue
if seq1Trunc[i] == seq2Trunc[i]:
ident += 1
score = ident/float(length - extraGaps)
return score
def getPathFromSEQID(SEQID, target):
# target: The type of file requested,
# such as a treestatscache file
return 0
def getBranchSummaryRecurse(node):
children = node.keys()
numChildren = len(children)
if numChildren == 0:
return {'count':0, 'depth':1, 'leaves':[node], 'branchLeafCount':0}
summary = {}
summary['count'] = numChildren - 1
summary['branchLeafCount'] = 0
summary['depth'] = 1
summary['leaves'] = []
depths = []
for child in children:
ret = getBranchSummaryRecurse(node[child])
summary['leaves'] = summary['leaves'] + ret['leaves']
summary['count'] = summary['count'] + ret['count']
depths.append(ret['depth'])
summary['branchLeafCount'] = ret['branchLeafCount'] + summary['branchLeafCount']
summary['depth'] = summary['depth'] + max(depths)
if (all(x == 1 for x in depths) == True):
summary['branchLeafCount'] = numChildren - 1
return summary
def getBranchSummary(tree):
# dTree is a tree stored as a dictionary
root = tree[tree.keys()[0]]
summary = {}
summary['count'] = 1
summary['branchLeafCount'] = 0
summary['depth'] = -1
summary['leaves'] = []
ret = getBranchSummaryRecurse(root)
summary['count'] = summary['count'] + ret['count']
summary['depth'] = summary['depth'] + ret['depth']
summary['leaves'] = ret['leaves']
summary['branchLeafCount'] = ret['branchLeafCount']
return summary
def getAssignmentsFile(SAPdir):
assignmentsPath = SAPdir + "assignments.csv"
print (assignmentsPath)
#assignmentsPath = SAPdir + "assignments-2.csv"
if (os.path.exists(assignmentsPath) == False):
print ("File not found: ", assignmentsPath)
return None
assignments = pd.read_csv(assignmentsPath, header=None)
assignments.columns = ["File", "Cutoff", "Detail", "ID",
"Phylum", "Class", "Order", "Family", "Genus", "Species",
"NumHomologues", "MinFreqHomologue", "MinTaxonProb"]
return assignments
def getTaxonProbabilitiesFile(SAPdir):
taxonProbsPath = SAPdir + "taxon_probabilities.csv"
if (os.path.exists(taxonProbsPath) == False):
print ("File not found: ", taxonProbsPath)
return None
taxonProbs = pd.read_csv(taxonProbsPath, header=None)
taxonProbs.columns = ["File", "ID", "BestRank", "BestTaxon", "PosteriorProb"]
return taxonProbs
def getTreeFile(SAPdir, queryFile, SEQID):
treePath = SAPdir + "treestatscache/" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".pickle"
if (os.path.exists(treePath) == False):
print ("File not found: ", treePath)
return None
treeFile = open(treePath, 'rb')
tree = pickle.load(treeFile)
treeFile.close()
tree._removePlaceHolders()
return tree
def getBlastCacheFile(SAPdir, queryFile, SEQID):
# MUST be dynamic/paramterized
blastCachePath = SAPdir + "blastcache/" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".200_10.0" + ".xml"
if (os.path.exists(blastCachePath) == False):
print ("File not found: ", blastCachePath)
return None
blastCache = xml.etree.ElementTree.parse(blastCachePath).getroot()
return blastCache
def getHomologCacheFile(SAPdir, queryFile, SEQID):
homologCachePath = SAPdir + "homologcache/" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".pickle"
if (os.path.exists(homologCachePath) == False):
print ("File not found: ", homologCachePath)
return None
homologCacheFile = open(homologCachePath, "rb")
homologCache = pickle.load(homologCacheFile)
homologCacheFile.close()
return homologCache
def getHomologFastaFile(SAPdir, queryFile, SEQID):
homologFastaPath = SAPdir + "homologcache/" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".fasta"
if (os.path.exists(homologFastaPath) == False):
print ("File not found: ", homologFastaPath)
return None
homologFasta = SeqIO.to_dict(SeqIO.parse(homologFastaPath, "fasta"))
return homologFasta
def getClonesFile(SAPdir, queryFile, SEQID):
clonesPath = SAPdir + "html/clones/" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".html"
if (os.path.exists(clonesPath) == False):
print ("File not found: ", clonesPath)
return None
clones = urllib.urlopen(clonesPath).read()
return clones
def ensure_dir(file_path):
#Source: https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def makeHomologueFastaPath(SAPdir, queryFile, SEQID):
ensure_dir(SAPdir + "evaluation/")
homologueFastaPath = SAPdir + "evaluation/" + "homologuesToAnalyze_" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".fasta"
return homologueFastaPath
def makeBlastResultsPath(SAPdir, queryFile, SEQID):
ensure_dir(SAPdir + "evaluation/")
blastResultsPath = SAPdir + "evaluation/" + "blast_" + queryFile + "_" + SEQID.replace(":", "_").replace("-", "_").replace(" ", "_").replace("|", "_") + ".xml"
return blastResultsPath
def makeOutputPath(SAPdir, outfile):
outputPath = SAPdir + outfile + ".csv"
return outputPath
def most_common(lst):
# Source: https://stackoverflow.com/a/1518632
return max(set(lst), key=lst.count)
def main():
# Parameters
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--directory", dest="DIR",
help = "SAP results directory (root)")
parser.add_option("-c", "--cutoff", dest="CUT",
help = "evaluate results at this cutoff")
parser.add_option("-o", "--outfile", dest="OUT",
help = "Output file to hold attributes CSV")
(options, args) = parser.parse_args ()
# SHOULD BE PARAMETERS
#SAPdir = "/media/Wapuilani/evan/repo/SAP_formatter_clean/Simons8/"
SAPdir = options.DIR
cutoff = float (options.CUT)
outfile = options.OUT
if (os.path.isdir(SAPdir) == False):
print ("Directory", SAPdir, "does not exist")
exit(1)
if (SAPdir[len(SAPdir) - 1] != '/'):
SAPdir = SAPdir + "/"
# Get assignments file
assignments = getAssignmentsFile(SAPdir)
assignments = assignments.loc[assignments['Cutoff'] == cutoff]
# Get taxon probabilities file
taxonProbs = getTaxonProbabilitiesFile(SAPdir)
taxonProbsBest = taxonProbs.drop_duplicates(subset="ID")
# Initialize table to hold scenario classification for each query sequence
classified = taxonProbsBest['ID'].copy().to_frame()
classified['SpeciesLevel'] = False
classified['SpeciesCount'] = 0
classified['BranchCount'] = 0
classified['BranchLeafCount'] = 0
classified['BranchInteriorCount'] = 0
classified['TreeDepth'] = 0
classified['SpeciesProbMax'] = 0
classified['SpeciesProbMin'] = 0
classified['HomologuesToAnalyzeCount'] = 0
classified['HomologuesSuspiciousRecordsCount'] = 0
classified['HasHomologuesSuspiciousRecords'] = False
classified['HomologueCount'] = 0
#classified['MinFreqHomologueProb'] = 0
#classified['MinFreqHomologueProbWarning'] = False
#classified['MinTaxonProb'] = 0
#classified['MinTaxonProbWarning'] = False
classified['DatabaseNotExhausted'] = False
# Investigate and classify each sequence assignment
# Note that the number of sequences may by less than the number of query sequences,
# since some queries cannot be assigned given the parameters and database
for index, row in taxonProbsBest.iterrows():
row['ID'] = re.sub (r" $", "", row['ID'])
print row['ID']
#break
print (assignments.loc[assignments['ID'] == row['ID'], 'Species'])
# Is there a species-level identification (posterior prob >= cutoff)
try:
math.isnan(assignments.loc[assignments['ID'] == row['ID'], 'Species'])
classified.loc[classified['ID'] == row['ID'], 'SpeciesLevel'] = False
except:
classified.loc[classified['ID'] == row['ID'], 'SpeciesLevel'] = True
# Get number of branches and tree depth
tree = getTreeFile (SAPdir, row['File'], row['ID'])
branchSummary = getBranchSummary(tree)
classified.loc[classified['ID'] == row['ID'], 'BranchCount'] = branchSummary['count']
classified.loc[classified['ID'] == row['ID'], 'TreeDepth'] = branchSummary['depth']
classified.loc[classified['ID'] == row['ID'], 'BranchLeafCount'] = branchSummary['branchLeafCount']
classified.loc[classified['ID'] == row['ID'], 'BranchInteriorCount'] = branchSummary['count'] - branchSummary['branchLeafCount']
# Get number of species in tree
speciesCount = len(branchSummary['leaves'])
classified.loc[classified['ID'] == row['ID'], 'SpeciesCount'] = speciesCount
# Get max and min species posterior prob
speciesProbs = tree.getLevelProbs(level="species")[-speciesCount:]
print (speciesProbs)
if (len (speciesProbs) > 0):
species, probs = zip(*speciesProbs)
speciesProbsMax = max(probs)
classified.loc[classified['ID'] == row['ID'], 'SpeciesProbMax'] = speciesProbsMax
speciesProbsMin = min(probs)
classified.loc[classified['ID'] == row['ID'], 'SpeciesProbMin'] = speciesProbsMin
else:
classified.loc[classified['ID'] == row['ID'], 'SpeciesProbMax'] = float('NaN')
classified.loc[classified['ID'] == row['ID'], 'SpeciesProbMin'] = float('NaN')
# Get HomologueCount, MinFreqHomologueProb, MinTaxonProb
classified.loc[classified['ID'] == row['ID'], 'HomologueCount'] = len(assignments.loc[assignments['ID'] == row['ID'], 'NumHomologues'])
#classified.loc[classified['ID'] == row['ID'], 'MinFreqHomologueProb'] = float(assignments.loc[assignments['ID'] == row['ID'], 'MinFreqHomologue'])
#if classified.loc[classified['ID'] == row['ID'], 'MinFreqHomologueProb'].any() > 0.0001:
# classified.loc[classified['ID'] == row['ID'], 'MinFreqHomologueProbWarning'] = True
#classified.loc[classified['ID'] == row['ID'], 'MinTaxonProb'] = float (assignments.loc[assignments['ID'] == row['ID'], 'MinTaxonProb'])
#if classified.loc[classified['ID'] == row['ID'], 'MinTaxonProb'].any() > 0.0001:
# classified.loc[classified['ID'] == row['ID'], 'MinTaxonProbWarning'] = True
# BLAST each species in tree
MislabelledHomologueCount = 0
blastCache = getBlastCacheFile(SAPdir, row['File'], row['ID'])
homologCache = getHomologCacheFile(SAPdir, row['File'], row['ID'])
homologFasta = getHomologFastaFile(SAPdir, row['File'], row['ID'])
homologues = homologCache.homologues
clones = getClonesFile(SAPdir, row['File'], row['ID'])
# Check for warning about database not being exhausted
warning = re.search("database was not exhausted", clones)
if warning is not None:
classified.loc[classified['ID'] == row['ID'], 'DatabaseNotExhausted'] = True
querySequence = homologCache.queryFasta.sequence
homologuesToAnalyze = []
homologuesToAnalyzeFasta = []
homologuesToAnalyzeFastaFile = makeHomologueFastaPath(SAPdir, row['File'], row['ID'])
homologuesToAnalyzeBlastResultsFile = makeBlastResultsPath(SAPdir, row['File'], row['ID'])
for h in homologues:
hFastaHeader = h + "_" + h
hFastaEntry = homologFasta[hFastaHeader]
cloneLine = re.search(hFastaHeader + r"<\/a>:<\/td><td>[.0-9]*<\/td>", clones)
hIdentityScore = re.search(r"[0-9]*\.[0-9]*", cloneLine.group(0)).group(0)
if (float(hIdentityScore) > 0.9):
homologuesToAnalyze.append(dict({'ID':h, 'FastaEntry':hFastaEntry, 'IdenityScore':hIdentityScore}))
homologuesToAnalyzeFasta.append(hFastaEntry)
classified.loc[classified['ID'] == row['ID'], 'HomologuesToAnalyzeCount'] = len(homologuesToAnalyze)
# If the number of homologues to analyze is greater than 1, Blast to find mislabeled database entries
if (len(homologuesToAnalyze) > 1):
# Write selected homologues to Fasta in order to Blast them
SeqIO.write(homologuesToAnalyzeFasta, homologuesToAnalyzeFastaFile, "fasta")
# Blast them
blastx_cline = NcbiblastnCommandline(query = homologuesToAnalyzeFastaFile,
db="/media/Wapuilani/Databases/FILTER_BLASTDB/nr_mito.SAP.fix.fasta", outfmt=5,
out=homologuesToAnalyzeBlastResultsFile, perc_identity=90)
blastx_cline()
result = open(homologuesToAnalyzeBlastResultsFile, "r")
records = NCBIXML.parse(result)
hasRecords = True
# Investigate Blast results for each selected homologue
item = next(records)
suspiciousHomologueDatabaseRecords = 0
while (hasRecords == True):
genusList = []
queryGenus = ""
count = 0
for alignment in item.alignments:
try:
genus = re.search(r"genus[^,]*,", alignment.title).group(0).replace('genus: ', '').replace(',', '')
except:
genus = ""
genusList.append(genus)
if count == 0:
queryGenus = genus
count = count + 1
consensusGenus = most_common(genusList)
if (consensusGenus != queryGenus):
suspiciousHomologueDatabaseRecords = suspiciousHomologueDatabaseRecords + 1
item = next(records, False)
if (item == False):
hasRecords = False
classified.loc[classified['ID'] == row['ID'], 'HomologuesSuspiciousRecordsCount'] = suspiciousHomologueDatabaseRecords
if suspiciousHomologueDatabaseRecords > 0:
classified.loc[classified['ID'] == row['ID'], 'HasHomologuesSuspiciousRecords'] = True
#print classified.loc[classified['ID'] == row['ID']]
#if (len(homologuesToAnalyze) > 1):
# exit(0)
outputPath = makeOutputPath(SAPdir, outfile)
classified.to_csv(outputPath, index = False)
return 0
if __name__ == "__main__":
main()
|
ekrell/evaluateSAP
|
evaluateSAP.py
|
Python
|
gpl-3.0
| 16,033
|
[
"BLAST"
] |
11395af9e02e18f00072c8b8f4c337fc06afdef7de44d4de813a17c07b7ea27f
|
import os
import gc
import sys
import time
import signal
import traceback
import numpy as np
from gpaw.atom.generator import Generator
from gpaw.atom.configurations import parameters
from gpaw.utilities import devnull
from gpaw import setup_paths
from gpaw import mpi
import gpaw
def equal(x, y, tolerance=0, fail=True, msg=''):
"""Compare x and y."""
if not np.isfinite(x - y) or abs(x - y) > tolerance:
msg = (msg + '%.9g != %.9g (error: |%.9g| > %.9g)' %
(x, y, x - y, tolerance))
if fail:
raise AssertionError(msg)
else:
sys.stderr.write('WARNING: %s\n' % msg)
def gen(symbol, exx=False, name=None, **kwargs):
if mpi.rank == 0:
if 'scalarrel' not in kwargs:
kwargs['scalarrel'] = True
g = Generator(symbol, **kwargs)
g.run(exx=exx, name=name, use_restart_file=False, **parameters[symbol])
mpi.world.barrier()
if setup_paths[0] != '.':
setup_paths.insert(0, '.')
def wrap_pylab(names=[]):
"""Use Agg backend and prevent windows from popping up."""
import matplotlib
matplotlib.use('Agg')
import pylab
def show(names=names):
if names:
name = names.pop(0)
else:
name = 'fig.png'
pylab.savefig(name)
pylab.show = show
tests = [
'ase3k_version.py',
'numpy_core_multiarray_dot.py',
'numpy_zdotc_graphite.py',
'gemm_complex.py',
'lapack.py',
'mpicomm.py',
'eigh.py',
'xc.py',
'gradient.py',
'pbe_pw91.py',
'cg2.py',
'd2Excdn2.py',
'dot.py',
'blas.py',
'gp2.py',
'non_periodic.py',
'erf.py',
'lf.py',
'lxc_fxc.py',
'Gauss.py',
'cluster.py',
'derivatives.py',
'second_derivative.py',
'integral4.py',
'transformations.py',
'occupations.py',
'nabla.py',
'aeatom.py',
'pbc.py',
'atoms_too_close.py',
'poisson.py',
'XC2.py',
'multipoletest.py',
'proton.py',
'vdwradii.py',
'parallel/ut_parallel.py',
'parallel/compare.py',
'ase3k.py',
'laplace.py',
'ds_beta.py',
'gauss_wave.py',
'planewavebasis.py',
'coulomb.py',
'timing.py',
'lcao_density.py',
'gauss_func.py',
'ah.py',
'ylexpand.py',
'wfs_io.py',
'wfs_auto.py',
'xcatom.py',
'parallel/overlap.py',
'symmetry.py',
'pes.py',
'elf.py',
'lebedev.py',
'usesymm.py',
# 'usesymm2.py',
'eed.py',
'partitioning.py',
'mixer.py',
'broydenmixer.py',
'restart.py',
'mgga_restart.py',
'gga_atom.py',
'bee1.py',
'external_potential.py',
'refine.py',
'revPBE.py',
'lcao_largecellforce.py',
'lcao_h2o.py',
'spin_contamination.py',
'lrtddft2.py',
'stdout.py',
'nonselfconsistentLDA.py',
'nonselfconsistent.py',
'ewald.py',
'harmonic.py',
'ut_csh.py',
'ut_rsh.py',
'spinpol.py',
'kptpar.py',
'plt.py',
'parallel/hamiltonian.py',
'restart2.py',
'hydrogen.py',
'H_force.py',
'Cl_minus.py',
'blocked_rmm_diis.py',
'degeneracy.py',
'h2o_xas.py',
'fermilevel.py',
'al_chain.py',
'bulk.py',
'si.py',
'gemm.py',
'gemv.py',
'asewannier.py',
'davidson.py',
'cg.py',
'h2o_xas_recursion.py',
'lrtddft.py',
'spectrum.py',
'lcao_bsse.py',
'lcao_force.py',
'parallel/lcao_hamiltonian.py',
'parallel/lcao_parallel.py',
'parallel/lcao_parallel_kpt.py',
'parallel/fd_parallel.py',
'parallel/fd_parallel_kpt.py',
'gllbatomic.py',
'ne_gllb.py',
'ne_disc.py',
'wannier_ethylene.py',
'CH4.py',
'neb.py',
'complex.py',
'diamond_absorption.py',
'aluminum_EELS.py',
'dump_chi0.py',
'au02_absorption.py',
'rpa_energy_Kr.py',
'bse_aluminum.py',
'bse_diamond.py',
'bse_vs_lrtddft.py',
'hgh_h2o.py',
'apmb.py',
'relax.py',
'muffintinpot.py',
'fixmom.py',
'fermisplit.py',
'be_nltd_ip.py',
'lcao_bulk.py',
'jstm.py',
'simple_stm.py',
'guc_force.py',
'td_na2.py',
'ldos.py',
'exx_coarse.py',
'2Al.py',
'lxc_xcatom.py',
'aedensity.py',
'si_primitive.py',
'restart_band_structure.py',
'IP_oxygen.py',
'atomize.py',
'dipole.py',
'Hubbard_U.py',
'Hubbard_U_Zn.py',
'revPBE_Li.py',
'si_xas.py',
'tpss.py',
'nsc_MGGA.py',
'8Si.py',
'dscf_lcao.py',
'coreeig.py',
'Cu.py',
'diamond_gllb.py',
'exx.py',
'h2o_dks.py',
'nscfsic.py',
'scfsic_h2.py',
'scfsic_n2.py',
'lb94.py',
'aluminum_EELS_lcao.py',
'vdw/quick.py',
'vdw/potential.py',
'vdw/quick_spin.py',
'vdw/ar2.py',
'fd2lcao_restart.py',
# 'eigh_perf.py', # Requires LAPACK 3.2.1 or later
'parallel/parallel_eigh.py',
'parallel/ut_hsops.py',
'parallel/ut_hsblacs.py',
'parallel/ut_invops.py',
'parallel/ut_kptops.py',
'parallel/ut_redist.py',
'parallel/pblas.py',
'parallel/blacsdist.py',
'parallel/scalapack.py',
'parallel/scalapack_diag_simple.py',
'parallel/realspace_blacs.py',
'parallel/lcao_projections.py',
#'dscf_forces.py',
'lrtddft3.py',
'AA_exx_enthalpy.py',
'transport.py',
'constant_electric_field.py',
'stark_shift.py',
'aluminum_testcell.py',
]
exclude = []
if mpi.size > 1:
exclude += ['pes.py',
'nscfsic.py',
'coreeig.py',
'asewannier.py',
'wannier_ethylene.py',
'muffintinpot.py',
'stark_shift.py']
if mpi.size > 2:
exclude += ['neb.py']
if mpi.size < 4:
exclude += ['parallel/pblas.py',
'parallel/scalapack.py',
'parallel/scalapack_diag_simple.py',
'parallel/realspace_blacs.py',
'AA_exx_enthalpy.py',
'bse_aluminum.py',
'bse_diamond.py',
'bse_vs_lrtddft.py']
if mpi.size != 4:
exclude += ['parallel/lcao_parallel.py']
exclude += ['parallel/fd_parallel.py']
if mpi.size == 8:
exclude += ['transport.py']
if mpi.size != 8:
exclude += ['parallel/lcao_parallel_kpt.py']
exclude += ['parallel/fd_parallel_kpt.py']
try:
import scipy
except ImportError:
exclude += ['diamond_absorption.py',
'aluminum_EELS.py',
'aluminum_EELS_lcao.py',
'aluminum_testcell.py',
'au02_absorption.py',
'bse_aluminum.py',
'bse_diamond.py',
'bse_vs_lrtddft.py',
'aeatom.py',
'rpa_energy_Kr.py']
for test in exclude:
if test in tests:
tests.remove(test)
class TestRunner:
def __init__(self, tests, stream=sys.__stdout__, jobs=1,
show_output=False):
if mpi.size > 1:
assert jobs == 1
self.jobs = jobs
self.show_output = show_output
self.tests = tests
self.failed = []
self.garbage = []
if mpi.rank == 0:
self.log = stream
else:
self.log = devnull
self.n = max([len(test) for test in tests])
def run(self):
self.log.write('=' * 77 + '\n')
if not self.show_output:
sys.stdout = devnull
ntests = len(self.tests)
t0 = time.time()
if self.jobs == 1:
self.run_single()
else:
# Run several processes using fork:
self.run_forked()
sys.stdout = sys.__stdout__
self.log.write('=' * 77 + '\n')
self.log.write('Ran %d tests out of %d in %.1f seconds\n' %
(ntests - len(self.tests), ntests, time.time() - t0))
if self.failed:
self.log.write('Tests failed: %d\n' % len(self.failed))
else:
self.log.write('All tests passed!\n')
self.log.write('=' * 77 + '\n')
return self.failed
def run_single(self):
while self.tests:
test = self.tests.pop(0)
try:
self.run_one(test)
except KeyboardInterrupt:
self.tests.append(test)
break
def run_forked(self):
j = 0
pids = {}
while self.tests or j > 0:
if self.tests and j < self.jobs:
test = self.tests.pop(0)
pid = os.fork()
if pid == 0:
exitcode = self.run_one(test)
os._exit(exitcode)
else:
j += 1
pids[pid] = test
else:
try:
while True:
pid, exitcode = os.wait()
if pid in pids:
break
except KeyboardInterrupt:
for pid, test in pids.items():
os.kill(pid, signal.SIGHUP)
self.write_result(test, 'STOPPED', time.time())
self.tests.append(test)
break
if exitcode:
self.failed.append(pids[pid])
del pids[pid]
j -= 1
def run_one(self, test):
if self.jobs == 1:
self.log.write('%*s' % (-self.n, test))
self.log.flush()
t0 = time.time()
filename = gpaw.__path__[0] + '/test/' + test
try:
loc = {}
execfile(filename, loc)
loc.clear()
del loc
self.check_garbage()
except KeyboardInterrupt:
self.write_result(test, 'STOPPED', t0)
raise
except:
failed = True
else:
failed = False
mpi.ibarrier(timeout=60.0) # guard against parallel hangs
me = np.array(failed)
everybody = np.empty(mpi.size, bool)
mpi.world.all_gather(me, everybody)
failed = everybody.any()
if failed:
self.fail(test, np.argwhere(everybody).ravel(), t0)
else:
self.write_result(test, 'OK', t0)
return failed
def check_garbage(self):
gc.collect()
n = len(gc.garbage)
self.garbage += gc.garbage
del gc.garbage[:]
assert n == 0, ('Leak: Uncollectable garbage (%d object%s) %s' %
(n, 's'[:n > 1], self.garbage))
def fail(self, test, ranks, t0):
if mpi.rank in ranks:
if sys.version_info >= (2, 4, 0, 'final', 0):
tb = traceback.format_exc()
else: # Python 2.3! XXX
tb = ''
traceback.print_exc()
else:
tb = ''
if mpi.size == 1:
text = 'FAILED!\n%s\n%s%s' % ('#' * 77, tb, '#' * 77)
self.write_result(test, text, t0)
else:
tbs = {tb: [0]}
for r in range(1, mpi.size):
if mpi.rank == r:
mpi.send_string(tb, 0)
elif mpi.rank == 0:
tb = mpi.receive_string(r)
if tb in tbs:
tbs[tb].append(r)
else:
tbs[tb] = [r]
if mpi.rank == 0:
text = ('FAILED! (rank %s)\n%s' %
(','.join([str(r) for r in ranks]), '#' * 77))
for tb, ranks in tbs.items():
if tb:
text += ('\nRANK %s:\n' %
','.join([str(r) for r in ranks]))
text += '%s%s' % (tb, '#' * 77)
self.write_result(test, text, t0)
self.failed.append(test)
def write_result(self, test, text, t0):
t = time.time() - t0
if self.jobs > 1:
self.log.write('%*s' % (-self.n, test))
self.log.write('%10.3f %s\n' % (t, text))
if __name__ == '__main__':
TestRunner(tests).run()
|
qsnake/gpaw
|
gpaw/test/__init__.py
|
Python
|
gpl-3.0
| 12,064
|
[
"GPAW"
] |
bdf72f67de060c19138c0be692c188c0e7dd02d3f27532bb6b04e9eba1c46503
|
# -*- coding: utf-8 -*-
#
# brunel_alpha_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Random balanced network (alpha synapses) connected with NEST
------------------------------------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in [1]_.
In contrast to ``brunel-alpha-numpy.py``, this variant uses NEST's builtin
connection routines to draw the random connections instead of NumPy.
When connecting the network, customary synapse models are used, which
allow for querying the number of created synapses. Using spike
recorders, the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
References
~~~~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of sparsely connected networks of excitatory and
inhibitory spiking neurons. Journal of Computational Neuroscience 8,
183-208.
See Also
~~~~~~~~~~~~
:doc:`brunel_alpha_numpy`
"""
###############################################################################
# Import all necessary modules for simulation, analysis and plotting. Scipy
# should be imported before nest.
import time
import numpy as np
import scipy.special as sp
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
###############################################################################
# Definition of functions used in this example. First, define the `Lambert W`
# function implemented in SLI. The second function computes the maximum of
# the postsynaptic potential for a synaptic input current of unit amplitude
# (1 pA) using the `Lambert W` function. Thus function will later be used to
# calibrate the synaptic weights.
def LambertWm1(x):
# Using scipy to mimic the gsl_sf_lambert_Wm1 function.
return sp.lambertw(x, k=-1 if x < 0 else 0).real
def ComputePSPnorm(tauMem, CMem, tauSyn):
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0 / b * (-LambertWm1(-np.exp(-1.0 / a) / a) - 1.0 / a)
# maximum of PSP for current of unit amplitude
return (np.exp(1.0) / (tauSyn * CMem * b) *
((np.exp(-t_max / tauMem) - np.exp(-t_max / tauSyn)) / b -
t_max * np.exp(-t_max / tauSyn)))
nest.ResetKernel()
###############################################################################
# Assigning the current time to a variable in order to determine the build
# time of the network.
startbuild = time.time()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
###############################################################################
# Definition of the parameters crucial for asynchronous irregular firing of
# the neurons.
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons in the network and the number of neurons
# recorded from
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
###############################################################################
# Definition of connectivity parameters
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the integrate and fire neuron and the
# synapses. The parameters of the neuron are stored in a dictionary. The
# synaptic currents are normalized such that the amplitude of the PSP is J.
tauSyn = 0.5 # synaptic time constant in ms
tauMem = 20.0 # time constant of membrane potential in ms
CMem = 250.0 # capacitance of membrane in in pF
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": CMem,
"tau_m": tauMem,
"tau_syn_ex": tauSyn,
"tau_syn_in": tauSyn,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
J_ex = J / J_unit # amplitude of excitatory postsynaptic current
J_in = -g * J_ex # amplitude of inhibitory postsynaptic current
###############################################################################
# Definition of threshold rate, which is the external rate needed to fix the
# membrane potential around its threshold, the external firing rate and the
# rate of the poisson generator which is multiplied by the in-degree CE and
# converted to Hz by multiplication by 1000.
nu_th = (theta * CMem) / (J_ex * CE * np.exp(1) * tauMem * tauSyn)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
################################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting ``print_time`` to `True` prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Configuration of the model ``iaf_psc_alpha`` and ``poisson_generator`` using
# ``SetDefaults``. This function expects the model to be the inserted as a
# string and the parameter to be specified in a dictionary. All instances of
# theses models created after this point will have the properties specified
# in the dictionary by default.
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nest.SetDefaults("poisson_generator", {"rate": p_rate})
###############################################################################
# Creation of the nodes using ``Create``. We store the returned handles in
# variables for later reference. Here the excitatory and inhibitory, as well
# as the poisson generator and two spike recorders. The spike recorders will
# later be used to record excitatory and inhibitory spikes.
nodes_ex = nest.Create("iaf_psc_alpha", NE)
nodes_in = nest.Create("iaf_psc_alpha", NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_recorder")
ispikes = nest.Create("spike_recorder")
###############################################################################
# Configuration of the spike recorders recording excitatory and inhibitory
# spikes by sending parameter dictionaries to ``set``. Setting the property
# `record_to` to *"ascii"* ensures that the spikes will be recorded to a file,
# whose name starts with the string assigned to the property `label`.
espikes.set(label="brunel-py-ex", record_to="ascii")
ispikes.set(label="brunel-py-in", record_to="ascii")
print("Connecting devices")
###############################################################################
# Definition of a synapse using ``CopyModel``, which expects the model name of
# a pre-defined synapse, the name of the customary synapse and an optional
# parameter dictionary. The parameters defined in the dictionary will be the
# default parameter for the customary synapse. Here we define one synapse for
# the excitatory and one for the inhibitory connections giving the
# previously defined weights and equal delays.
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
#################################################################################
# Connecting the previously defined poisson generator to the excitatory and
# inhibitory neurons using the excitatory synapse. Since the poisson
# generator is connected to all neurons in the population the default rule
# (``all_to_all``) of ``Connect`` is used. The synaptic properties are inserted
# via ``syn_spec`` which expects a dictionary when defining multiple variables or
# a string when simply using a pre-defined synapse.
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
###############################################################################
# Connecting the first ``N_rec`` nodes of the excitatory and inhibitory
# population to the associated spike recorders using excitatory synapses.
# Here the same shortcut for the specification of the synapse as defined
# above is used.
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
###############################################################################
# Connecting the excitatory population to all neurons using the pre-defined
# excitatory synapse. Beforehand, the connection parameter are defined in a
# dictionary. Here we use the connection rule ``fixed_indegree``,
# which requires the definition of the indegree. Since the synapse
# specification is reduced to assigning the pre-defined excitatory synapse it
# suffices to insert a string.
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
###############################################################################
# Connecting the inhibitory population to all neurons using the pre-defined
# inhibitory synapse. The connection parameter as well as the synapse
# parameter are defined analogously to the connection from the excitatory
# population defined above.
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
###############################################################################
# Storage of the time point after the buildup of the network in a variable.
endbuild = time.time()
###############################################################################
# Simulation of the network.
print("Simulating")
nest.Simulate(simtime)
###############################################################################
# Storage of the time point after the simulation of the network in a variable.
endsimulate = time.time()
###############################################################################
# Reading out the total number of spikes received from the spike recorder
# connected to the excitatory population and the inhibitory population.
events_ex = espikes.n_events
events_in = ispikes.n_events
###############################################################################
# Calculation of the average firing rate of the excitatory and the inhibitory
# neurons by dividing the total number of recorded spikes by the number of
# neurons recorded from and the simulation time. The multiplication by 1000.0
# converts the unit 1/ms to 1/s=Hz.
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
###############################################################################
# Reading out the number of connections established using the excitatory and
# inhibitory synapse model. The numbers are summed up resulting in the total
# number of synapses.
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
###############################################################################
# Establishing the time it took to build and simulate the network by taking
# the difference of the pre-defined time variables.
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
###############################################################################
# Printing the network properties, firing rates and building times.
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
###############################################################################
# Plot a raster of the excitatory neurons and a histogram.
nest.raster_plot.from_device(espikes, hist=True)
plt.show()
|
stinebuu/nest-simulator
|
pynest/examples/brunel_alpha_nest.py
|
Python
|
gpl-2.0
| 13,729
|
[
"NEURON"
] |
3cda375ba464544de2048b7cebc17fdc14a64c08d4889260d2911113e4cbfb72
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
import os
from Biskit.tools import *
time_offset = 40.0 ## (start time)
def get_rst_time( frst ):
"""extract time of last snapshot"""
f = open( frst )
f.readline()
l = f.readline()
f.close()
t = float( l.split()[1] )
return t
def rename_current_files( folder, current_time, exclude=[] ):
"""Rename *crd, *out, *vel, *rst to *_x_ps_.crd/out/vel/rst"""
fs = os.listdir( folder )
for f in fs:
if f[-3:] in ['crd','out','vel','rst' ] and not f in exclude:
ending = f[-3:]
name = stripFilename( f )
new_name = '%s/%s_%ips.%s' % \
(absfile( folder), name, current_time, ending)
print "renaming %s -> %s" % (f, new_name)
os.rename( folder + '/' + f, new_name )
def create_start_From_template( fTemplate, fout ):
"""get start.csh with right rst file"""
pass
def adapt_inp( fold_inp, fnew_inp, current_time, old_total=0 ):
"""Parse inp, adapt step number to time remaining from total"""
dt = 0
sf = open( fold_inp ).read()
sf = sf.replace('\n',',')
sf = sf.replace(',,',',')
sf = sf.replace('\n','')
sf = sf.replace(' ', '')
commands = sf.split(',')
for c in commands:
if c.find('=') != -1:
param = c.split('=')[0]
value = c.split('=')[1]
if param == 'nstlim' and old_total==0:
old_total = int( value )
if param == 'dt':
dt = float( value )
if not( dt and old_total ):
raise Exception('didnt find dt and/or nstlim option')
new_total = int( ( old_total * dt - current_time ) / dt )
fout = open( fnew_inp, 'w')
for c in commands:
if c.find('=') != -1 :
fout.write(' ')
if c.find('nstlim') != -1:
fout.write('nstlim=%i,\n' % new_total)
else:
fout.write('%s,\n' % c )
else:
fout.write( '%s\n' % c )
fout.close()
###MAIN###
o = cmdDict( {'f':'.', 't0':'40', 'rst':'sim.rst', 'inp':'sim.inp',
'e':'eq.rst'} )
if len( sys.argv ) < 2:
print \
"""
Prepare the restart of a broken Amber MD run. Current *crd etc. are
moved to oldName_TIMEps.* and the nstlim option in the input file
is set to the number of steps remaining to the end of the MD.
am_restartMD.py -f |folder| [ -t0 |time_offset| -tot |nstlim_total|
-rst |rst_file|
-inp |inp_file| -e |exclude_files_from_renaming| ]
tot - needed for 2nd restart, total number of MD steps (w/o restart)
t0 - starting time in ps of this MD
default:
"""
for k,v in o.items():
print "\t%s\t%s" % (k,v)
sys.exit(0)
else:
try:
rst = absfile( o['f'] ) + '/' + o['rst']
except:
rst = absfile( o['rst'] )
t0 = int( o['t0'] )
tcurrent = get_rst_time( rst )
finp = absfile( o['inp'] )
exclude_files = toList( o['e'] )
adapt_inp( finp, finp, tcurrent - t0, int( o.get('tot',0)) )
rename_current_files( absfile( o['f'] ), tcurrent, exclude_files )
|
ostrokach/biskit
|
scripts/Biskit/am_restartMD.py
|
Python
|
gpl-3.0
| 4,056
|
[
"Amber"
] |
ccff5b70c7a7218b534759896e3b59ed79e10d9d4a00dff392345058d88d0951
|
#!/usr/bin/env python3
#
# Copyright (c) 2014, Scott Silver Labs, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
import shutil
import subprocess
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
TGT_CONFIG_FILE = '/etc/rstem_ide.conf'
TGT_INSTALL_DIR = '/opt/raspberrystem/ide'
TGT_PYTHON_DOCS_DIR = '/opt/raspberrystem/python.org'
TGT_HTML_SYMLINK = '/opt/raspberrystem/pydoc'
TGT_CONFIG_FILE = '/etc/rstem_ide.conf'
TGT_BIN_SYMLINK = '/usr/local/bin/rstem_ided'
TGT_INITD = '/etc/init.d/rstem_ided'
TGT_OPENBOX_FILE = '/home/pi/.config/openbox/lxde-pi-rc.xml'
TGT_DESKTOP_FILE = '/home/pi/desktop/openbox/rstem.desktop'
outputs = [
TGT_INSTALL_DIR,
TGT_PYTHON_DOCS_DIR,
TGT_CONFIG_FILE,
TGT_BIN_SYMLINK,
TGT_INITD,
]
def _post_install(dir):
# import rstem to find its install path
# NOTE: Require dependency on rstem
import rstem
pydoc_path = os.path.join(os.path.dirname(rstem.__file__), 'pydoc', rstem.__name__)
for dir in [TGT_INSTALL_DIR, TGT_PYTHON_DOCS_DIR]:
print('Removing: ' + dir)
shutil.rmtree(dir, ignore_errors=True)
for dir in [TGT_INSTALL_DIR, TGT_PYTHON_DOCS_DIR]:
print('Installing: ' + dir)
shutil.copytree(os.path.basename(dir), dir)
print('Creating links...')
# API docs symlink - note: TGT_HTML_SYMLINK not considered an output of the
# install because if it is, then on pip uninstall, it will not remove the
# symlink, but instead removes the files linked TO.
try:
os.remove(TGT_HTML_SYMLINK)
except OSError:
pass
print(' symlink {} -->\n {}'.format(TGT_HTML_SYMLINK, pydoc_path))
os.symlink(pydoc_path, TGT_HTML_SYMLINK)
# server binary symlink
try:
os.remove(TGT_BIN_SYMLINK)
except OSError:
pass
dest_bin = os.path.join(TGT_INSTALL_DIR, 'server')
print(' symlink {} -->\n {}'.format(TGT_BIN_SYMLINK, dest_bin))
os.symlink(dest_bin, TGT_BIN_SYMLINK)
os.chmod(TGT_BIN_SYMLINK, 0o4755)
# Copy config file
SRC_CONFIG_FILE = '.' + TGT_CONFIG_FILE
print('Copy config file {} -> {}'.format(SRC_CONFIG_FILE, TGT_CONFIG_FILE))
shutil.copy(SRC_CONFIG_FILE, TGT_CONFIG_FILE)
# Copy and create link for init script
SRC_INITD = '.' + TGT_INITD
print('Copy init.d script {} -> {}'.format(SRC_INITD, TGT_INITD))
shutil.copy(SRC_INITD, TGT_INITD)
os.chmod(TGT_INITD, 0o755)
# symlink is created via postinstall script
# WM rc config file
try:
os.makedirs("/".join(TGT_OPENBOX_FILE.split("/")[:-1]) + "/")
except:
pass
try:
print('Backup {} -> {}'.format(TGT_OPENBOX_FILE, TGT_OPENBOX_FILE + '.old'))
shutil.copy(TGT_OPENBOX_FILE, TGT_OPENBOX_FILE + '.old')
except:
pass
print('Copy {} -> {}'.format("./configfiles/lxde-pi-rc.xml", TGT_OPENBOX_FILE))
shutil.copy("./configfiles/lxde-pi-rc.xml", TGT_OPENBOX_FILE)
# Desktop link
print('Copy {} -> {}'.format("./configfiles/rstem.desktop", TGT_DESKTOP_FILE))
shutil.copy("./configfiles/rstem.desktop", TGT_DESKTOP_FILE)
# Additional post install steps via shell script
from subprocess import call
call('bash ./pkg/postinstall %s rstem' % dir, shell=True)
# Post installation task to setup raspberry pi
class install(_install):
# Required to force PiP to know about our additional files.
def get_outputs(self):
return super().get_outputs() + outputs
def run(self):
super().run()
self.execute(_post_install, (self.install_lib,), msg='Running post install task...')
setup(
name = read('NAME').strip(),
version = read('VERSION').strip(),
author = 'Brian Silverman',
author_email = 'bri@raspberrystem.com',
description = ('RaspberrySTEM IDE'),
license = 'Apache License 2.0',
keywords = ['raspberrystem', 'raspberrypi', 'stem', 'ide'],
url = 'http://www.raspberrystem.com',
long_description = read('README.md'),
# use https://pypi.python.org/pypi?%3Aaction=list_classifiers as help when editing this
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Education',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
cmdclass={'install': install}, # overload install command
)
|
scottsilverlabs/raspberrystem-ide
|
setup.py
|
Python
|
apache-2.0
| 5,092
|
[
"Brian"
] |
9768b110d14dfebfd140ac516b02d8ce1b5cb02fbf9121a6159e997653c6c0e5
|
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import mf
class KnowValues(unittest.TestCase):
def test_dft_sv(self):
""" Try to compute the xc potential """
sv = mf(label='water', cd=os.path.dirname(os.path.abspath(__file__)))
vxc = sv.vxc_lil()
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0027_dft_sv_nao.py
|
Python
|
apache-2.0
| 360
|
[
"PySCF"
] |
8f9bf037a5eb6fc7435fc7b2e04ab7a3eda75572d788fd8b7dc718580c9828b4
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os
import pytest
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.sql import SparkSession
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.data.pandas import read_csv
from zoo.orca.data import SparkXShards
from zoo.orca.learn.pytorch import Estimator
from zoo.orca.learn.metrics import Accuracy
from zoo.orca.learn.trigger import EveryEpoch
from zoo.orca.learn.optimizers import SGD
from zoo.orca.learn.optimizers.schedule import Default
from zoo.orca import OrcaContext
import tempfile
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
class TestEstimatorForDataFrame(TestCase):
def setUp(self):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.sc = init_orca_context(cores=4)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
self.spark = SparkSession(self.sc)
self.spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
self.spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
def tearDown(self):
""" teardown any state that was previously setup with a setup_method
call.
"""
stop_orca_context()
def test_bigdl_pytorch_estimator_dataframe_predict(self):
def loss_func(input, target):
return nn.CrossEntropyLoss().forward(input, target.flatten().long())
class IdentityNet(nn.Module):
def __init__(self):
super().__init__()
# need this line to avoid optimizer raise empty variable list
self.fc1 = nn.Linear(5, 5)
def forward(self, input_):
return input_
model = IdentityNet()
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2,
size=()))])).toDF(["feature", "label"])
with tempfile.TemporaryDirectory() as temp_dir_name:
estimator = Estimator.from_torch(model=model, loss=loss_func,
optimizer=SGD(learningrate_schedule=Default()),
model_dir=temp_dir_name)
result = estimator.predict(df, feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_bigdl_pytorch_estimator_dataframe_fit_evaluate(self):
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = nn.Linear(5, 5)
def forward(self, x):
x = self.fc(x)
return F.log_softmax(x, dim=1)
model = SimpleModel()
def loss_func(input, target):
return nn.CrossEntropyLoss().forward(input, target.flatten().long())
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2,
size=()))])).toDF(["feature", "label"])
with tempfile.TemporaryDirectory() as temp_dir_name:
estimator = Estimator.from_torch(model=model, loss=loss_func, metrics=[Accuracy()],
optimizer=SGD(learningrate_schedule=Default()),
model_dir=temp_dir_name)
estimator.fit(data=df, epochs=4, batch_size=2, validation_data=df,
checkpoint_trigger=EveryEpoch(),
feature_cols=["feature"], label_cols=["label"])
eval_result = estimator.evaluate(df, batch_size=2,
feature_cols=["feature"], label_cols=["label"])
assert isinstance(eval_result, dict)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/jep/test_pytorch_estimator_for_dataframe.py
|
Python
|
apache-2.0
| 4,995
|
[
"ORCA"
] |
f888206e0c4502373a26ad754e39b4726ccb5d03312470e43964740ca1101a07
|
class TrrHeader:
"""Container for header info from Gromacs' TRR file"""
# Header format (after tag, positions 24:84)
# > - big endian
# l - input record size (usually 0) ---
# l - energy record size (usually 0) |
# l - box size (9*4 = 36 bytes, usually always) |
# l - virial size (0) |
# l - pressure size (0) frsize =
# l - topology size (0) sum + 84
# l - symbol table size (0) |
# l - X array size (dim*floatSize*natoms, if present) |
# l - V array size (dim*floatSize*natoms, if present) |
# l - F array size (dim*floatSize*natoms, if present) ---
# l - NATOMS
# l - step
# l - nre (number of energy terms?)
# f/d - time (depends on TRR file precision)
# f/d - lambda (depends on TRR file precision)
def __init__(self, header, dim=3):
stuff = struct.unpack('>lllllllllllll', header)
self.inputRec = stuff[0]
self.energy = stuff[1]
self.box = stuff[2]
self.virial = stuff[3]
self.pressure = stuff[4]
self.topology = stuff[5]
self.symboltable = stuff[6]
self.x = stuff[7]
self.v = stuff[8]
self.f = stuff[9]
self.atoms = stuff[10]
self.step = stuff[11]
self.nre = stuff[12]
self.bytesize = sum(stuff[:10])
self.block = dim*self.atoms
self.float = self.box//(dim*dim) or (self.x or self.v or self.f)//self.block
# Set the dtype and the dtype with endiannes
self.dtype = (self.float == 4 and "f") or (self.float == 8 and "d")
if not self.dtype:
raise IOError("Unable to determine precision of TRR file. Float size appears to be {}".format(self.float))
self.dtypeE = '>'+self.dtype
|
Tsjerk/MartiniTools
|
gmx/trr/header.py
|
Python
|
gpl-2.0
| 2,072
|
[
"Gromacs"
] |
134dbab5a6e3572c67692d177d84cd28b782917f61ad2265308eda75eb47d1d2
|
# -*- coding: utf-8 -*-
"""Module for low-level parsing of nagios-style configuration files."""
import os
import re
import sys
import time
import pynag.Utils
from pynag.Utils import paths
# TODO: Raise more specific errors in this module.
from pynag.Parsers.errors import ParserError
class ConfigFileNotFound(ParserError):
""" This exception is thrown if we cannot locate any nagios.cfg-style config file. """
class Config(object):
""" Parse and write nagios config files """
# Regex for beginning of object definition
# We want everything that matches:
# define <object_type> {
__beginning_of_object = re.compile("^\s*define\s+(\w+)\s*\{?(.*)$")
def __init__(self, cfg_file=None, strict=False):
""" Constructor for :py:class:`pynag.Parsers.config` class
Args:
cfg_file (str): Full path to nagios.cfg. If None, try to
auto-discover location
strict (bool): if True, use stricter parsing which is more prone to
raising exceptions
"""
self.cfg_file = cfg_file # Main configuration file
self.strict = strict # Use strict parsing or not
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
self.cfg_file = self.guess_cfg_file()
self.data = {}
self.maincfg_values = []
self._is_dirty = False
self.reset() # Initilize misc member variables
def guess_nagios_directory(self):
""" Returns a path to the nagios configuration directory on your system
Use this function for determining the nagios config directory in your
code
Returns:
str. directory containing the nagios.cfg file
Raises:
:py:class:`pynag.Parsers.ConfigFileNotFound` if cannot guess config
file location.
"""
cfg_file = self.guess_cfg_file()
if not cfg_file:
raise ConfigFileNotFound("Could not find nagios.cfg")
return os.path.dirname(cfg_file)
def guess_nagios_binary(self):
""" Returns a path to any nagios binary found on your system
Use this function if you don't want specify path to the nagios binary
in your code and you are confident that it is located in a common
location
Checked locations are as follows:
Returns:
str. Path to the nagios binary
None if could not find a binary in any of those locations
"""
for i in paths.BINARY_NAMES:
command = ['which', i]
code, stdout, stderr = pynag.Utils.runCommand(command=command, shell=False)
if code == 0:
return stdout.splitlines()[0].strip()
return None
def guess_cfg_file(self):
""" Returns a path to any nagios.cfg found on your system
Use this function if you don't want specify path to nagios.cfg in your
code and you are confident that it is located in a common location
Checked locations are as follows:
Returns:
str. Path to the nagios.cfg or equivalent file
None if couldn't find a file in any of these locations.
"""
for file_path in paths.COMMON_CONFIG_FILE_LOCATIONS:
if self.isfile(file_path):
return file_path
return None
def reset(self):
""" Reinitializes the data of a parser instance to its default values.
"""
self.cfg_files = [] # List of other configuration files
self.data = {} # dict of every known object definition
self.errors = [] # List of ParserErrors
self.item_list = None
self.item_cache = None
self.maincfg_values = [] # The contents of main nagios.cfg
self._resource_values = [] # The contents of any resource_files
self.item_apply_cache = {} # This is performance tweak used by _apply_template
# This is a pure listof all the key/values in the config files. It
# shouldn't be useful until the items in it are parsed through with the proper
# 'use' relationships
self.pre_object_list = []
self.post_object_list = []
self.object_type_keys = {
'hostgroup': 'hostgroup_name',
'hostextinfo': 'host_name',
'host': 'host_name',
'service': 'name',
'servicegroup': 'servicegroup_name',
'contact': 'contact_name',
'contactgroup': 'contactgroup_name',
'timeperiod': 'timeperiod_name',
'command': 'command_name',
#'service':['host_name','description'],
}
def _has_template(self, target):
""" Determine if an item has a template associated with it
Args:
target (dict): Parsed item as parsed by :py:class:`pynag.Parsers.config`
"""
return 'use' in target
def _get_pid(self):
""" Checks the lock_file var in nagios.cfg and returns the pid from the file
If the pid file does not exist, returns None.
"""
try:
return self.open(self.get_cfg_value('lock_file'), "r").readline().strip()
except Exception:
return None
def _get_hostgroup(self, hostgroup_name):
""" Returns the hostgroup that matches the queried name.
Args:
hostgroup_name: Name of the hostgroup to be returned (string)
Returns:
Hostgroup item with hostgroup_name that matches the queried name.
"""
return self.data['all_hostgroup'].get(hostgroup_name, None)
def _get_key(self, object_type, user_key=None):
""" Return the correct 'key' for an item.
This is mainly a helper method for other methods in this class. It is
used to shorten code repetition.
Args:
object_type: Object type from which to obtain the 'key' (string)
user_key: User defined key. Default None. (string)
Returns:
Correct 'key' for the object type. (string)
"""
if not user_key and not object_type in self.object_type_keys:
raise ParserError("Unknown key for object type: %s\n" % object_type)
# Use a default key
if not user_key:
user_key = self.object_type_keys[object_type]
return user_key
def _get_item(self, item_name, item_type):
""" Return an item from a list
Creates a cache of items in self.pre_object_list and returns an element
from this cache. Looks for an item with corresponding name and type.
Args:
item_name: Name of the item to be returned (string)
item_type: Type of the item to be returned (string)
Returns:
Item with matching name and type from
:py:attr:`pynag.Parsers.config.item_cache`
"""
# create local cache for performance optimizations. TODO: Rewrite functions that call this function
if not self.item_list:
self.item_list = self.pre_object_list
self.item_cache = {}
for item in self.item_list:
if not "name" in item:
continue
name = item['name']
tmp_item_type = (item['meta']['object_type'])
if not tmp_item_type in self.item_cache:
self.item_cache[tmp_item_type] = {}
self.item_cache[tmp_item_type][name] = item
my_cache = self.item_cache.get(item_type, None)
if not my_cache:
return None
return my_cache.get(item_name, None)
def _apply_template(self, original_item):
""" Apply all attributes of item named parent_name to "original_item".
Applies all of the attributes of parents (from the 'use' field) to item.
Args:
original_item: Item 'use'-ing a parent item. The parent's attributes
will be concretely added to this item.
Returns:
original_item to which have been added all the attributes defined
in parent items.
"""
# TODO: There is space for more performance tweaks here
# If item does not inherit from anyone else, lets just return item as is.
if 'use' not in original_item:
return original_item
object_type = original_item['meta']['object_type']
raw_definition = original_item['meta']['raw_definition']
my_cache = self.item_apply_cache.get(object_type, {})
# Performance tweak, if item has been parsed. Lets not do it again
if raw_definition in my_cache:
return my_cache[raw_definition]
parent_names = original_item['use'].split(',')
parent_items = []
for parent_name in parent_names:
parent_item = self._get_item(parent_name, object_type)
if parent_item is None:
error_string = "Can not find any %s named %s\n" % (object_type, parent_name)
self.errors.append(ParserError(error_string, item=original_item))
continue
try:
# Parent item probably has use flags on its own. So lets apply to parent first
parent_item = self._apply_template(parent_item)
except RuntimeError:
t, e = sys.exc_info()[:2]
self.errors.append(ParserError("Error while parsing item: %s (it might have circular use=)" % str(e),
item=original_item))
parent_items.append(parent_item)
inherited_attributes = original_item['meta']['inherited_attributes']
template_fields = original_item['meta']['template_fields']
for parent_item in parent_items:
for k, v in parent_item.iteritems():
if k in ('use', 'register', 'meta', 'name'):
continue
if k not in inherited_attributes:
inherited_attributes[k] = v
if k not in original_item:
original_item[k] = v
template_fields.append(k)
if 'name' in original_item:
my_cache[raw_definition] = original_item
return original_item
def _get_items_in_file(self, filename):
""" Return all items in the given file
Iterates through all elements in self.data and gatehrs all the items
defined in the queried filename.
Args:
filename: file from which are defined the items that will be
returned.
Returns:
A list containing all the items in self.data that were defined in
filename
"""
return_list = []
for k in self.data.keys():
for item in self[k]:
if item['meta']['filename'] == filename:
return_list.append(item)
return return_list
def get_new_item(self, object_type, filename):
""" Returns an empty item with all necessary metadata
Creates a new item dict and fills it with usual metadata:
* object_type : object_type (arg)
* filename : filename (arg)
* template_fields = []
* needs_commit = None
* delete_me = None
* defined_attributes = {}
* inherited_attributes = {}
* raw_definition = "define %s {\\n\\n} % object_type"
Args:
object_type: type of the object to be created (string)
filename: Path to which the item will be saved (string)
Returns:
A new item with default metadata
"""
meta = {
'object_type': object_type,
'filename': filename,
'template_fields': [],
'needs_commit': None,
'delete_me': None,
'defined_attributes': {},
'inherited_attributes': {},
'raw_definition': "define %s {\n\n}" % object_type,
}
return {'meta': meta}
def _load_file(self, filename):
""" Parses filename with self.parse_filename and append results in self._pre_object_list
This function is mostly here for backwards compatibility
Args:
filename: the file to be parsed. This is supposed to a nagios object definition file
"""
for i in self.parse_file(filename):
self.pre_object_list.append(i)
def parse_file(self, filename):
""" Parses a nagios object configuration file and returns lists of dictionaries.
This is more or less a wrapper around :py:meth:`config.parse_string`,
so reading documentation there is useful.
Args:
filename: Path to the file to parse (string)
Returns:
A list containing elements parsed by :py:meth:`parse_string`
"""
try:
raw_string = self.open(filename, 'rb').read()
return self.parse_string(raw_string, filename=filename)
except IOError:
t, e = sys.exc_info()[:2]
parser_error = ParserError(e.strerror)
parser_error.filename = e.filename
self.errors.append(parser_error)
return []
def parse_string(self, string, filename='None'):
""" Parses a string, and returns all object definitions in that string
Args:
string: A string containing one or more object definitions
filename (optional): If filename is provided, it will be referenced
when raising exceptions
Examples:
>>> test_string = "define host {\\nhost_name examplehost\\n}\\n"
>>> test_string += "define service {\\nhost_name examplehost\\nservice_description example service\\n}\\n"
>>> c = Config()
>>> result = c.parse_string(test_string)
>>> for i in result: print i.get('host_name'), i.get('service_description', None)
examplehost None
examplehost example service
Returns:
A list of dictionaries, that look like self.data
Raises:
:py:class:`ParserError`
"""
append = ""
current = None
in_definition = {}
tmp_buffer = []
result = []
for sequence_no, line in enumerate(string.splitlines(False)):
line_num = sequence_no + 1
# If previous line ended with backslash, treat this line as a
# continuation of previous line
if append:
line = append + line
append = None
# Cleanup and line skips
line = line.strip()
if line == "":
continue
if line[0] == "#" or line[0] == ';':
continue
# If this line ends with a backslash, continue directly to next line
if line.endswith('\\'):
append = line.strip('\\')
continue
if line.startswith('}'): # end of object definition
if not in_definition:
p = ParserError("Unexpected '}' found outside object definition in line %s" % line_num)
p.filename = filename
p.line_start = line_num
raise p
in_definition = None
current['meta']['line_end'] = line_num
# Looks to me like nagios ignores everything after the } so why shouldn't we ?
rest = line.split("}", 1)[1]
tmp_buffer.append(line)
try:
current['meta']['raw_definition'] = '\n'.join(tmp_buffer)
except Exception:
raise ParserError("Encountered Unexpected end of object definition in file '%s'." % filename)
result.append(current)
# Destroy the Nagios Object
current = None
continue
elif line.startswith('define'): # beginning of object definition
if in_definition:
msg = "Unexpected 'define' in {filename} on line {line_num}. was expecting '}}'."
msg = msg.format(**locals())
self.errors.append(ParserError(msg, item=current))
m = self.__beginning_of_object.search(line)
tmp_buffer = [line]
object_type = m.groups()[0]
if self.strict and object_type not in self.object_type_keys.keys():
raise ParserError(
"Don't know any object definition of type '%s'. it is not in a list of known object definitions." % object_type)
current = self.get_new_item(object_type, filename)
current['meta']['line_start'] = line_num
# Start off an object
in_definition = True
# Looks to me like nagios ignores everything after the {, so why shouldn't we ?
rest = m.groups()[1]
continue
else: # In the middle of an object definition
tmp_buffer.append(' ' + line)
# save whatever's left in the buffer for the next iteration
if not in_definition:
append = line
continue
# this is an attribute inside an object definition
if in_definition:
#(key, value) = line.split(None, 1)
tmp = line.split(None, 1)
if len(tmp) > 1:
(key, value) = tmp
else:
key = tmp[0]
value = ""
# Strip out in-line comments
if value.find(";") != -1:
value = value.split(";", 1)[0]
# Clean info
key = key.strip()
value = value.strip()
# Rename some old values that may be in the configuration
# This can probably be removed in the future to increase performance
if (current['meta']['object_type'] == 'service') and key == 'description':
key = 'service_description'
# Special hack for timeperiods as they are not consistent with other objects
# We will treat whole line as a key with an empty value
if (current['meta']['object_type'] == 'timeperiod') and key not in ('timeperiod_name', 'alias'):
key = line
value = ''
current[key] = value
current['meta']['defined_attributes'][key] = value
# Something is wrong in the config
else:
raise ParserError("Error: Unexpected token in file '%s'" % filename)
# Something is wrong in the config
if in_definition:
raise ParserError("Error: Unexpected EOF in file '%s'" % filename)
return result
def _locate_item(self, item):
""" This is a helper function for anyone who wishes to modify objects.
It takes "item", locates the file which is configured in, and locates
exactly the lines which contain that definition.
Returns: (tuple)
(everything_before, object_definition, everything_after, filename):
* everything_before (list of lines): Every line in filename before object was defined
* everything_after (list of lines): Every line in "filename" after object was defined
* object_definition (list of lines): Every line used to define our item in "filename"
* filename (string): file in which the object was written to
Raises:
:py:class:`ValueError` if object was not found in "filename"
"""
if "filename" in item['meta']:
filename = item['meta']['filename']
else:
raise ValueError("item does not have a filename")
# Look for our item, store it as my_item
for i in self.parse_file(filename):
if self.compareObjects(item, i):
my_item = i
break
else:
raise ValueError("We could not find object in %s\n%s" % (filename, item))
# Caller of this method expects to be returned
# several lists that describe the lines in our file.
# The splitting logic starts here.
my_file = self.open(filename)
all_lines = my_file.readlines()
my_file.close()
start = my_item['meta']['line_start'] - 1
end = my_item['meta']['line_end']
everything_before = all_lines[:start]
object_definition = all_lines[start:end]
everything_after = all_lines[end:]
# If there happen to be line continuations in the object we will edit
# We will remove them from object_definition
object_definition = self._clean_backslashes(object_definition)
return everything_before, object_definition, everything_after, filename
def _clean_backslashes(self, list_of_strings):
""" Returns list_of_strings with all all strings joined that ended with backslashes
Args:
list_of_strings: List of strings to join
Returns:
Another list of strings, which lines ending with \ joined together.
"""
tmp_buffer = ''
result = []
for i in list_of_strings:
if i.endswith('\\\n'):
tmp_buffer += i.strip('\\\n')
else:
result.append(tmp_buffer + i)
tmp_buffer = ''
return result
def _modify_object(self, item, field_name=None, new_value=None, new_field_name=None, new_item=None,
make_comments=False):
""" Locates "item" and changes the line which contains field_name.
Helper function for object_* functions. Locates "item" and changes the
line which contains field_name. If new_value and new_field_name are both
None, the attribute is removed.
Args:
item(dict): The item to be modified
field_name(str): The field_name to modify (if any)
new_field_name(str): If set, field_name will be renamed
new_value(str): If set the value of field_name will be changed
new_item(str): If set, whole object will be replaced with this
string
make_comments: If set, put pynag-branded comments where changes
have been made
Returns:
True on success
Raises:
:py:class:`ValueError` if object or field_name is not found
:py:class:`IOError` is save is unsuccessful.
"""
if item is None:
return
if field_name is None and new_item is None:
raise ValueError("either field_name or new_item must be set")
if '\n' in str(new_value):
raise ValueError("Invalid character \\n used as an attribute value.")
everything_before, object_definition, everything_after, filename = self._locate_item(item)
if new_item is not None:
# We have instruction on how to write new object, so we dont need to parse it
object_definition = [new_item]
else:
change = None
value = None
i = 0
for i in range(len(object_definition)):
tmp = object_definition[i].split(None, 1)
if len(tmp) == 0:
continue
# Hack for timeperiods, they dont work like other objects
elif item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
tmp = [object_definition[i]]
# we can't change timeperiod, so we fake a field rename
if new_value is not None:
new_field_name = new_value
new_value = None
value = ''
elif len(tmp) == 1:
value = ''
else:
value = tmp[1]
k = tmp[0].strip()
if k == field_name:
# Attribute was found, lets change this line
if new_field_name is None and new_value is None:
# We take it that we are supposed to remove this attribute
change = object_definition.pop(i)
break
elif new_field_name:
# Field name has changed
k = new_field_name
if new_value is not None:
# value has changed
value = new_value
# Here we do the actual change
change = "\t%-30s%s\n" % (k, value)
if item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
change = "\t%s\n" % new_field_name
object_definition[i] = change
break
if not change and new_value is not None:
# Attribute was not found. Lets add it
change = "\t%-30s%s\n" % (field_name, new_value)
object_definition.insert(i, change)
# Lets put a banner in front of our item
if make_comments:
comment = '# Edited by PyNag on %s\n' % time.ctime()
if len(everything_before) > 0:
last_line_before = everything_before[-1]
if last_line_before.startswith('# Edited by PyNag on'):
everything_before.pop() # remove this line
object_definition.insert(0, comment)
# Here we overwrite the config-file, hoping not to ruin anything
str_buffer = "%s%s%s" % (''.join(everything_before), ''.join(object_definition), ''.join(everything_after))
self.write(filename, str_buffer)
return True
def open(self, filename, *args, **kwargs):
""" Wrapper around global open()
Simply calls global open(filename, *args, **kwargs) and passes all arguments
as they are received. See global open() function for more details.
"""
return open(filename, *args, **kwargs)
@pynag.Utils.synchronized(pynag.Utils.rlock)
def write(self, filename, string):
""" Wrapper around open(filename).write()
Writes string to filename and closes the file handler. File handler is
openned in `'w'` mode.
Args:
filename: File where *string* will be written. This is the path to
the file. (string)
string: String to be written to file. (string)
Returns:
Return code as returned by :py:meth:`os.write`
"""
fh = self.open(filename, 'w')
return_code = fh.write(string)
fh.flush()
# os.fsync(fh)
fh.close()
self._is_dirty = True
return return_code
def item_rewrite(self, item, str_new_item):
""" Completely rewrites item with string provided.
Args:
item: Item that is to be rewritten
str_new_item: str representation of the new item
..
In the following line, every "\\n" is actually a simple line break
This is only a little patch for the generated documentation.
Examples::
item_rewrite( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, new_item=str_new_item)
def item_remove(self, item):
""" Delete one specific item from its configuration files
Args:
item: Item that is to be rewritten
str_new_item: string representation of the new item
..
In the following line, every "\\n" is actually a simple line break
This is only a little patch for the generated documentation.
Examples::
item_remove( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, new_item="")
def item_edit_field(self, item, field_name, new_value):
""" Modifies one field of a (currently existing) object.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to be modified. Its field `field_name` will be set to
`new_value`.
field_name: Name of the field that will be modified. (str)
new_value: Value to which will be set the field `field_name`. (str)
Example usage::
edit_object( item, field_name="host_name", new_value="examplehost.example.com") # doctest: +SKIP
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item, field_name=field_name, new_value=new_value)
def item_remove_field(self, item, field_name):
""" Removes one field of a (currently existing) object.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to remove field from.
field_name: Field to remove. (string)
Example usage::
item_remove_field( item, field_name="contactgroups" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, field_name=field_name, new_value=None, new_field_name=None)
def item_rename_field(self, item, old_field_name, new_field_name):
""" Renames a field of a (currently existing) item.
Changes are immediate (i.e. there is no commit).
Args:
item: Item to modify.
old_field_name: Name of the field that will have its name changed. (string)
new_field_name: New name given to `old_field_name` (string)
Example usage::
item_rename_field(item, old_field_name="normal_check_interval", new_field_name="check_interval")
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, field_name=old_field_name, new_field_name=new_field_name)
def item_add(self, item, filename):
""" Adds a new object to a specified config file.
Args:
item: Item to be created
filename: Filename that we are supposed to write the new item to.
This is the path to the file. (string)
Returns:
True on success
Raises:
:py:class:`IOError` on failed save
"""
if not 'meta' in item:
item['meta'] = {}
item['meta']['filename'] = filename
# Create directory if it does not already exist
dirname = os.path.dirname(filename)
if not self.isdir(dirname):
os.makedirs(dirname)
str_buffer = self.print_conf(item)
fh = self.open(filename, 'a')
fh.write(str_buffer)
fh.close()
return True
def edit_object(self, item, field_name, new_value):
""" Modifies a (currently existing) item.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to modify.
field_name: Field that will be updated.
new_value: Updated value of field `field_name`
Example Usage:
edit_object( item, field_name="host_name", new_value="examplehost.example.com")
Returns:
True on success
.. WARNING::
THIS FUNCTION IS DEPRECATED. USE item_edit_field() instead
"""
return self.item_edit_field(item=item, field_name=field_name, new_value=new_value)
def compareObjects(self, item1, item2):
""" Compares two items. Returns true if they are equal
Compares every key: value pair for both items. If anything is different,
the items will not be considered equal.
Args:
item1, item2: Items to be compared.
Returns:
True -- Items are equal
False -- Items are not equal
"""
keys1 = item1['meta']['defined_attributes'].keys()
keys2 = item2['meta']['defined_attributes'].keys()
keys1.sort()
keys2.sort()
result = True
if keys1 != keys2:
return False
for key in keys1:
if key == 'meta':
continue
key1 = item1[key]
key2 = item2[key]
# For our purpose, 30 is equal to 30.000
if key == 'check_interval':
key1 = int(float(key1))
key2 = int(float(key2))
if str(key1) != str(key2):
result = False
if result is False:
return False
return True
def edit_service(self, target_host, service_description, field_name, new_value):
""" Edit a service's attributes
Takes a host, service_description pair to identify the service to modify
and sets its field `field_name` to `new_value`.
Args:
target_host: name of the host to which the service is attached to. (string)
service_description: Service description of the service to modify. (string)
field_name: Field to modify. (string)
new_value: Value to which the `field_name` field will be updated (string)
Returns:
True on success
Raises:
:py:class:`ParserError` if the service is not found
"""
original_object = self.get_service(target_host, service_description)
if original_object is None:
raise ParserError("Service not found")
return self.edit_object(original_object, field_name, new_value)
def _get_list(self, item, key):
""" Return a comma list from an item
Args:
item: Item from which to select value. (string)
key: Field name of the value to select and return as a list. (string)
Example::
_get_list(Foo_object, host_name)
define service {
service_description Foo
host_name larry,curly,moe
}
returns
['larry','curly','moe']
Returns:
A list of the item's values of `key`
Raises:
:py:class:`ParserError` if item is not a dict
"""
if not isinstance(item, dict):
raise ParserError("%s is not a dictionary\n" % item)
# return []
if not key in item:
return []
return_list = []
if item[key].find(",") != -1:
for name in item[key].split(","):
return_list.append(name)
else:
return_list.append(item[key])
# Alphabetize
return_list.sort()
return return_list
def delete_object(self, object_type, object_name, user_key=None):
""" Delete object from configuration files
Args:
object_type: Type of the object to delete from configuration files.
object_name: Name of the object to delete from configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
item = self.get_object(object_type=object_type, object_name=object_name, user_key=user_key)
return self.item_remove(item)
def delete_service(self, service_description, host_name):
""" Delete service from configuration files
Args:
service_description: service_description field value of the object
to delete from configuration files.
host_name: host_name field value of the object to delete from
configuration files.
Returns:
True on success.
"""
item = self.get_service(host_name, service_description)
return self.item_remove(item)
def delete_host(self, object_name, user_key=None):
""" Delete a host from its configuration files
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
return self.delete_object('host', object_name, user_key=user_key)
def delete_hostgroup(self, object_name, user_key=None):
""" Delete a hostgroup from its configuration files
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
return self.delete_object('hostgroup', object_name, user_key=user_key)
def get_object(self, object_type, object_name, user_key=None):
""" Return a complete object dictionary
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: User defined key. Default None. (string)
Returns:
The item found to match all the criterias.
None if object is not found
"""
object_key = self._get_key(object_type, user_key)
for item in self.data['all_%s' % object_type]:
if item.get(object_key, None) == object_name:
return item
return None
def get_host(self, object_name, user_key=None):
""" Return a host object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('host', object_name, user_key=user_key)
def get_servicegroup(self, object_name, user_key=None):
""" Return a Servicegroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('servicegroup', object_name, user_key=user_key)
def get_contact(self, object_name, user_key=None):
""" Return a Contact object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('contact', object_name, user_key=user_key)
def get_contactgroup(self, object_name, user_key=None):
""" Return a Contactgroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('contactgroup', object_name, user_key=user_key)
def get_timeperiod(self, object_name, user_key=None):
""" Return a Timeperiod object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('timeperiod', object_name, user_key=user_key)
def get_command(self, object_name, user_key=None):
""" Return a Command object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('command', object_name, user_key=user_key)
def get_hostgroup(self, object_name, user_key=None):
""" Return a hostgroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('hostgroup', object_name, user_key=user_key)
def get_servicedependency(self, object_name, user_key=None):
""" Return a servicedependency object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('servicedependency', object_name, user_key=user_key)
def get_hostdependency(self, object_name, user_key=None):
""" Return a hostdependency object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('hostdependency', object_name, user_key=user_key)
def get_service(self, target_host, service_description):
""" Return a service object
Args:
target_host: host_name field of the service to be returned. This is
the host to which is attached the service.
service_description: service_description field of the service to be
returned.
Returns:
The item found to match all the criterias.
"""
for item in self.data['all_service']:
if item.get('service_description') == service_description and item.get('host_name') == target_host:
return item
return None
def _append_use(self, source_item, name):
""" Append attributes to source_item that are inherited via 'use' attribute'
Args:
source_item: item (dict) to apply the inheritance upon
name: obsolete (discovered automatically via source_item['use'].
Here for compatibility.
Returns:
Source Item with appended attributes.
Raises:
:py:class:`ParserError` on recursion errors
"""
# Remove the 'use' key
if "use" in source_item:
del source_item['use']
for possible_item in self.pre_object_list:
if "name" in possible_item:
# Start appending to the item
for k, v in possible_item.iteritems():
try:
if k == 'use':
source_item = self._append_use(source_item, v)
except Exception:
raise ParserError("Recursion error on %s %s" % (source_item, v))
# Only add the item if it doesn't already exist
if not k in source_item:
source_item[k] = v
return source_item
def _post_parse(self):
""" Creates a few optimization tweaks and easy access lists in self.data
Creates :py:attr:`config.item_apply_cache` and fills the all_object
item lists in self.data.
"""
self.item_list = None
self.item_apply_cache = {} # This is performance tweak used by _apply_template
for raw_item in self.pre_object_list:
# Performance tweak, make sure hashmap exists for this object_type
object_type = raw_item['meta']['object_type']
if not object_type in self.item_apply_cache:
self.item_apply_cache[object_type] = {}
# Tweak ends
if "use" in raw_item:
raw_item = self._apply_template(raw_item)
self.post_object_list.append(raw_item)
# Add the items to the class lists.
for list_item in self.post_object_list:
type_list_name = "all_%s" % list_item['meta']['object_type']
if not type_list_name in self.data:
self.data[type_list_name] = []
self.data[type_list_name].append(list_item)
def commit(self):
""" Write any changes that have been made to it's appropriate file """
# Loops through ALL items
for k in self.data.keys():
for item in self[k]:
# If the object needs committing, commit it!
if item['meta']['needs_commit']:
# Create file contents as an empty string
file_contents = ""
# find any other items that may share this config file
extra_items = self._get_items_in_file(item['meta']['filename'])
if len(extra_items) > 0:
for commit_item in extra_items:
# Ignore files that are already set to be deleted:w
if commit_item['meta']['delete_me']:
continue
# Make sure we aren't adding this thing twice
if item != commit_item:
file_contents += self.print_conf(commit_item)
# This is the actual item that needs commiting
if not item['meta']['delete_me']:
file_contents += self.print_conf(item)
# Write the file
filename = item['meta']['filename']
self.write(filename, file_contents)
# Recreate the item entry without the commit flag
self.data[k].remove(item)
item['meta']['needs_commit'] = None
self.data[k].append(item)
def flag_all_commit(self):
""" Flag every item in the configuration to be committed
This should probably only be used for debugging purposes
"""
for object_type in self.data.keys():
for item in self.data[object_type]:
item['meta']['needs_commit'] = True
def print_conf(self, item):
""" Return a string that can be used in a configuration file
Args:
item: Item to be dumped as a string.
Returns:
String representation of item.
"""
object_type = item['meta']['object_type']
output = "define %s {\n" % object_type
for k, v in item.iteritems():
if v is None:
# Skip entries with No value
continue
if k != 'meta':
if k not in item['meta']['template_fields']:
output += "\t %-30s %-30s\n" % (k, v)
output += "}\n\n"
return output
def _load_static_file(self, filename=None):
""" Load a general config file (like nagios.cfg) that has key=value config file format. Ignore comments
Arguments:
filename: name of file to parse, if none nagios.cfg will be used
Returns:
a [ (key,value), (key,value) ] list
"""
result = []
if not filename:
filename = self.cfg_file
for line in self.open(filename).readlines():
# Strip out new line characters
line = line.strip()
# Skip blank lines
if line == "":
continue
# Skip comments
if line[0] == "#" or line[0] == ';':
continue
tmp = line.split("=", 1)
if len(tmp) < 2:
continue
key, value = tmp
key = key.strip()
value = value.strip()
result.append((key, value))
return result
def _edit_static_file(self, attribute, new_value, filename=None, old_value=None, append=False):
""" Modify a general config file (like nagios.cfg) that has a key=value config file format.
Arguments:
filename: Name of config file that will be edited (i.e. nagios.cfg)
attribute: name of attribute to edit (i.e. check_external_commands)
new_value: new value for the said attribute (i.e. "1"). None deletes
the line.
old_value: Useful if multiple attributes exist (i.e. cfg_dir) and
you want to replace a specific one.
append: If true, do not overwrite current setting. Instead append
this at the end. Use this with settings that are repeated like
cfg_file.
Examples::
_edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='check_external_commands', new_value='1')
_edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='cfg_dir', new_value='/etc/nagios/okconfig', append=True)
"""
if filename is None:
filename = self.cfg_file
# For some specific attributes, append should be implied
if attribute in ('cfg_file', 'cfg_dir', 'broker_module'):
append = True
# If/when we make a change, new_line is what will be written
new_line = '%s=%s\n' % (attribute, new_value)
# new_value=None means line should be removed
if new_value is None:
new_line = ''
write_buffer = self.open(filename).readlines()
is_dirty = False # dirty if we make any changes
for i, line in enumerate(write_buffer):
# Strip out new line characters
line = line.strip()
# Skip blank lines
if line == "":
continue
# Skip comments
if line[0] == "#" or line[0] == ';':
continue
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
# If key does not match, we are not interested in this line
if key != attribute:
continue
# If old_value was specified, and it matches, dont have to look any further
elif value == old_value:
write_buffer[i] = new_line
is_dirty = True
break
# if current value is the same as new_value, no need to make changes
elif value == new_value:
return False
# Special so cfg_dir matches despite double-slashes, etc
elif attribute == 'cfg_dir' and new_value and os.path.normpath(value) == os.path.normpath(new_value):
return False
# We are not appending, and no old value was specified:
elif append is False and not old_value:
write_buffer[i] = new_line
is_dirty = True
break
if is_dirty is False and new_value is not None:
# If we get here, it means we read the whole file,
# and we have not yet made any changes, So we assume
# We should append to the file
write_buffer.append(new_line)
is_dirty = True
# When we get down here, it is time to write changes to file
if is_dirty is True:
str_buffer = ''.join(write_buffer)
self.write(filename, str_buffer)
return True
else:
return False
def needs_reload(self):
""" Checks if the Nagios service needs a reload.
Returns:
True if Nagios service needs reload of cfg files
False if reload not needed or Nagios is not running
"""
if not self.maincfg_values:
self.reset()
self.parse_maincfg()
new_timestamps = self.get_timestamps()
object_cache_file = self.get_cfg_value('object_cache_file')
if self._get_pid() is None:
return False
if not object_cache_file:
return True
if not self.isfile(object_cache_file):
return True
object_cache_timestamp = new_timestamps.get(object_cache_file, 0)
# Reload not needed if no object_cache file
if object_cache_file is None:
return False
for k, v in new_timestamps.items():
if not v or int(v) > object_cache_timestamp:
return True
return False
def needs_reparse(self):
""" Checks if the Nagios configuration needs to be reparsed.
Returns:
True if any Nagios configuration file has changed since last parse()
"""
# If Parse has never been run:
if self.data == {}:
return True
# If previous save operation has forced a reparse
if self._is_dirty is True:
return True
# If we get here, we check the timestamps of the configs
new_timestamps = self.get_timestamps()
if len(new_timestamps) != len(self.timestamps):
return True
for k, v in new_timestamps.items():
if self.timestamps.get(k, None) != v:
return True
return False
@pynag.Utils.synchronized(pynag.Utils.rlock)
def parse_maincfg(self):
""" Parses your main configuration (nagios.cfg) and stores it as key/value pairs in self.maincfg_values
This function is mainly used by config.parse() which also parses your
whole configuration set.
Raises:
py:class:`ConfigFileNotFound`
"""
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
raise ConfigFileNotFound('Could not find nagios.cfg')
self.maincfg_values = self._load_static_file(self.cfg_file)
@pynag.Utils.synchronized(pynag.Utils.rlock)
def parse(self):
""" Parse all objects in your nagios configuration
This functions starts by loading up your nagios.cfg ( parse_maincfg() )
then moving on to your object configuration files (as defined via
cfg_file and cfg_dir) and and your resource_file as well.
Returns:
None
Raises:
:py:class:`IOError` if unable to read any file due to permission
problems
"""
# reset
self.reset()
self.parse_maincfg()
self.cfg_files = self.get_cfg_files()
# When parsing config, we will softly fail if permission denied
# comes on resource files. If later someone tries to get them via
# get_resource, we will fail hard
try:
self._resource_values = self.get_resources()
except IOError:
t, e = sys.exc_info()[:2]
self.errors.append(str(e))
self.timestamps = self.get_timestamps()
# This loads everything into
for cfg_file in self.cfg_files:
self._load_file(cfg_file)
self._post_parse()
self._is_dirty = False
def get_resource(self, resource_name):
""" Get a single resource value which can be located in any resource.cfg file
Arguments:
resource_name: Name as it appears in resource file (i.e. $USER1$)
Returns:
String value of the resource value.
Raises:
:py:class:`KeyError` if resource is not found
:py:class:`ParserError` if resource is not found and you do not have
permissions
"""
resources = self.get_resources()
for k, v in resources:
if k == resource_name:
return v
def get_timestamps(self):
""" Returns hash map of all nagios related files and their timestamps"""
files = {}
files[self.cfg_file] = None
for k, v in self.maincfg_values:
if k in ('resource_file', 'lock_file', 'object_cache_file'):
files[v] = None
for i in self.get_cfg_files():
files[i] = None
# Now lets lets get timestamp of every file
for k, v in files.items():
if not self.isfile(k):
continue
files[k] = self.stat(k).st_mtime
return files
def isfile(self, *args, **kwargs):
""" Wrapper around os.path.isfile """
return os.path.isfile(*args, **kwargs)
def isdir(self, *args, **kwargs):
""" Wrapper around os.path.isdir """
return os.path.isdir(*args, **kwargs)
def islink(self, *args, **kwargs):
""" Wrapper around os.path.islink """
return os.path.islink(*args, **kwargs)
def readlink(self, *args, **kwargs):
""" Wrapper around os.readlink """
return os.readlink(*args, **kwargs)
def stat(self, *args, **kwargs):
""" Wrapper around os.stat """
return os.stat(*args, **kwargs)
def remove(self, *args, **kwargs):
""" Wrapper around os.remove """
return os.remove(*args, **kwargs)
def access(self, *args, **kwargs):
""" Wrapper around os.access """
return os.access(*args, **kwargs)
def listdir(self, *args, **kwargs):
""" Wrapper around os.listdir """
return os.listdir(*args, **kwargs)
def exists(self, *args, **kwargs):
""" Wrapper around os.path.exists """
return os.path.exists(*args, **kwargs)
def get_resources(self):
"""Returns a list of every private resources from nagios.cfg"""
resources = []
for config_object, config_value in self.maincfg_values:
if config_object == 'resource_file' and self.isfile(config_value):
resources += self._load_static_file(config_value)
return resources
def extended_parse(self):
""" This parse is used after the initial parse() command is run.
It is only needed if you want extended meta information about hosts or other objects
"""
# Do the initial parsing
self.parse()
# First, cycle through the hosts, and append hostgroup information
index = 0
for host in self.data['all_host']:
if host.get("register", None) == "0":
continue
if not "host_name" in host:
continue
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
# Append any hostgroups that are directly listed in the host definition
if "hostgroups" in host:
for hostgroup_name in self._get_list(host, 'hostgroups'):
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
if hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:
self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)
# Append any services which reference this host
service_list = []
for service in self.data['all_service']:
if service.get("register", None) == "0":
continue
if not "service_description" in service:
continue
if host['host_name'] in self._get_active_hosts(service):
service_list.append(service['service_description'])
self.data['all_host'][index]['meta']['service_list'] = service_list
# Increment count
index += 1
# Loop through all hostgroups, appending them to their respective hosts
for hostgroup in self.data['all_hostgroup']:
for member in self._get_list(hostgroup, 'members'):
index = 0
for host in self.data['all_host']:
if not "host_name" in host:
continue
# Skip members that do not match
if host['host_name'] == member:
# Create the meta var if it doesn' exist
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
if hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:
self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])
# Increment count
index += 1
# Expand service membership
index = 0
for service in self.data['all_service']:
# Find a list of hosts to negate from the final list
self.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)
# Increment count
index += 1
def _get_active_hosts(self, item):
""" Given an object, return a list of active hosts.
This will exclude hosts that are negated with a "!"
Args:
item: Item to obtain active hosts from.
Returns:
List of all the active hosts for `item`
"""
# First, generate the negation list
negate_hosts = []
# Hostgroups
if "hostgroup_name" in item:
for hostgroup_name in self._get_list(item, 'hostgroup_name'):
if hostgroup_name[0] == "!":
hostgroup_obj = self.get_hostgroup(hostgroup_name[1:])
negate_hosts.extend(self._get_list(hostgroup_obj, 'members'))
# Host Names
if "host_name" in item:
for host_name in self._get_list(item, 'host_name'):
if host_name[0] == "!":
negate_hosts.append(host_name[1:])
# Now get hosts that are actually listed
active_hosts = []
# Hostgroups
if "hostgroup_name" in item:
for hostgroup_name in self._get_list(item, 'hostgroup_name'):
if hostgroup_name[0] != "!":
active_hosts.extend(self._get_list(self.get_hostgroup(hostgroup_name), 'members'))
# Host Names
if "host_name" in item:
for host_name in self._get_list(item, 'host_name'):
if host_name[0] != "!":
active_hosts.append(host_name)
# Combine the lists
return_hosts = []
for active_host in active_hosts:
if active_host not in negate_hosts:
return_hosts.append(active_host)
return return_hosts
def get_cfg_dirs(self):
""" Parses the main config file for configuration directories
Returns:
List of all cfg directories used in this configuration
Example::
print(get_cfg_dirs())
['/etc/nagios/hosts','/etc/nagios/objects',...]
"""
cfg_dirs = []
for config_object, config_value in self.maincfg_values:
if config_object == "cfg_dir":
cfg_dirs.append(config_value)
return cfg_dirs
def get_cfg_files(self):
""" Return a list of all cfg files used in this configuration
Filenames are normalised so that if nagios.cfg specifies relative
filenames we will convert it to fully qualified filename before returning.
Returns:
List of all configurations files used in the configuration.
Example:
print(get_cfg_files())
['/etc/nagios/hosts/host1.cfg','/etc/nagios/hosts/host2.cfg',...]
"""
cfg_files = []
for config_object, config_value in self.maincfg_values:
# Add cfg_file objects to cfg file list
if config_object == "cfg_file":
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
# Parse all files in a cfg directory
if config_object == "cfg_dir":
config_value = self.abspath(config_value)
directories = []
raw_file_list = []
directories.append(config_value)
# Walk through every subdirectory and add to our list
while directories:
current_directory = directories.pop(0)
# Nagios doesnt care if cfg_dir exists or not, so why should we ?
if not self.isdir(current_directory):
continue
for item in self.listdir(current_directory):
# Append full path to file
item = "%s" % (os.path.join(current_directory, item.strip()))
if self.islink(item):
item = os.readlink(item)
if self.isdir(item):
directories.append(item)
if raw_file_list.count(item) < 1:
raw_file_list.append(item)
for raw_file in raw_file_list:
if raw_file.endswith('.cfg'):
if self.exists(raw_file) and not self.isdir(raw_file):
# Nagios doesnt care if cfg_file exists or not, so we will not throws errors
cfg_files.append(raw_file)
return cfg_files
def abspath(self, path):
""" Return the absolute path of a given relative path.
The current working directory is assumed to be the dirname of nagios.cfg
Args:
path: relative path to be transformed into absolute path. (string)
Returns:
Absolute path of given relative path.
Example:
>>> c = Config(cfg_file="/etc/nagios/nagios.cfg")
>>> c.abspath('nagios.cfg')
'/etc/nagios/nagios.cfg'
>>> c.abspath('/etc/nagios/nagios.cfg')
'/etc/nagios/nagios.cfg'
"""
if not isinstance(path, str):
return ValueError("Path must be a string got %s instead" % type(path))
if path.startswith('/'):
return path
nagiosdir = os.path.dirname(self.cfg_file)
normpath = os.path.abspath(os.path.join(nagiosdir, path))
return normpath
def get_cfg_value(self, key):
""" Returns one specific value from your nagios.cfg file,
None if value is not found.
Arguments:
key: what attribute to fetch from nagios.cfg (example: "command_file" )
Returns:
String of the first value found for
Example:
>>> c = Config() # doctest: +SKIP
>>> log_file = c.get_cfg_value('log_file') # doctest: +SKIP
# Should return something like "/var/log/nagios/nagios.log"
"""
if not self.maincfg_values:
self.parse_maincfg()
for k, v in self.maincfg_values:
if k == key:
return v
return None
def get_object_types(self):
""" Returns a list of all discovered object types """
return map(lambda x: re.sub("all_", "", x), self.data.keys())
def cleanup(self):
""" Remove configuration files that have no configuration items """
for filename in self.cfg_files:
if not self.parse_file(filename): # parse_file returns empty list on empty files
self.remove(filename)
# If nagios.cfg specifies this file directly via cfg_file directive then...
for k, v in self.maincfg_values:
if k == 'cfg_file' and v == filename:
self._edit_static_file(k, old_value=v, new_value=None)
def __setitem__(self, key, item):
self.data[key] = item
def __getitem__(self, key):
return self.data[key]
|
llange/pynag
|
pynag/Parsers/config_parser.py
|
Python
|
gpl-2.0
| 69,210
|
[
"MOE"
] |
adae88b9bd61a12b8861efe01d005a67c7576aff076a4e9297c52ac1506bba25
|
import requests
import os
import json
from flask import request, render_template, send_from_directory
from bs4 import BeautifulSoup
from jobcert import app, db, models
import job_posting
from parser import Parser
@app.route('/')
def index():
jobsposting_support_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'jobposting-support.json')
jobsposting_support_json = file(jobsposting_support_file).read()
jobsposting_support_data = json.loads(jobsposting_support_json)
jobsposting_support_data = sorted(jobsposting_support_data, key=lambda k: k['name'])
return render_template('index.html', menu_item="tools", jobposting_support = jobsposting_support_data)
@app.route('/about')
def report():
return render_template('report.html', menu_item="report")
@app.route('/api')
def api():
return render_template('api.html', menu_item="api")
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/check', methods=['GET', 'POST'])
def check():
#get html
error = False
html = None
url = None
if request.method == 'POST':
html = request.values['html']
if request.method == 'GET':
url = request.values['url']
try:
html = requests.get(url).content
except requests.exceptions.ConnectionError:
error = "Sorry, that URL does not exist"
except requests.exceptions.MissingSchema:
error = "Sorry, that is not a valid URL"
except requests.exceptions.InvalidSchema:
error = "Sorry, that is not a valid URL"
except requests.exceptions.HTTPError:
error = "Sorry, something went wrong"
except requests.exceptions.Timeout:
error = "Sorry, there was a timeout when trying to visit that URL"
#parse
parser = Parser()
if error == False:
parser.parse(html)
#save results
log = models.Log()
log.populate_from_parser(url, parser)
db.session.add(log)
db.session.commit()
return render_template('check.html', menu_item="tools", parser=parser, error=error, url=url)
|
Doteveryone/BetterJobAdverts
|
jobcert/views.py
|
Python
|
agpl-3.0
| 2,177
|
[
"VisIt"
] |
4401f5043f79a4d873e9002d8c456c8e9ccf2ec8c29fcd4b4469ece901768d47
|
#
# Copyright 2014 CIRAD
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#!usr/bin/python
import optparse, os, shutil, subprocess, sys, tempfile, fileinput, ConfigParser, operator, time, multiprocessing
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def run_job (cmd_line, ERROR):
# print cmd_line
try:
tmp = tempfile.NamedTemporaryFile().name
# print tmp
error = open(tmp, 'w')
proc = subprocess.Popen( args=cmd_line, shell=True, stderr=error)
returncode = proc.wait()
error.close()
error = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += error.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
error.close()
os.remove(tmp)
if returncode != 0:
raise Exception, stderr
except Exception, e:
stop_err( ERROR + str( e ) )
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
# Wrapper options.
parser.add_option( '', '--blast', dest='blast', default='not_filled', help='The blast file default output format')
parser.add_option( '', '--ident', dest='ident', default='90', help='The minimal identity percentage, [default: %default]')
parser.add_option( '', '--max_hit', dest='max_hit', default='1', help='The maximal hit number, [default: %default]')
parser.add_option( '', '--seq', dest='seq', default='100', help='The number of sequence to search simultaneously, [default: %default]')
parser.add_option( '', '--thread', dest='thread', default='1', help='The thread number used for mapping (integer), [default: %default]')
parser.add_option( '', '--out', dest='out', default='not_filled', help='An id for intermediate output')
(options, args) = parser.parse_args()
ScriptPath = os.path.dirname(sys.argv[0])
loca_programs = ConfigParser.RawConfigParser()
loca_programs.read(ScriptPath+'/loca_programs.conf')
proc = int(options.thread)
if options.blast == 'not_filled':
mot = 'Please provide an argument for --blast'
sys.exit(mot)
if options.out == 'not_filled':
mot = 'Please provide an argument for --out'
sys.exit(mot)
file = open (options.blast)
j = 0
liste_job = []
liste_temp = []
for line in file:
if line.split() != []:
if line.split()[0] == 'BLASTN':
if j == 0:
outfile = open(options.out+'_File'+str(j)+'.temp','w')
outfile.write(line)
liste_job.append('%s %s/blat_results_analyzer_v3.pl --blat %s --identity %s --nombre %s > %s' % (loca_programs.get('Programs','perl'), ScriptPath, options.out+'_File'+str(j)+'.temp', options.ident, options.max_hit, options.out+'.bra'+str(j)))
liste_temp.append(str(j))
elif j%int(options.seq) == 0:
outfile.close()
liste_job.append('%s %s/blat_results_analyzer_v3.pl --blat %s --identity %s --nombre %s > %s' % (loca_programs.get('Programs','perl'), ScriptPath, options.out+'_File'+str(j)+'.temp', options.ident, options.max_hit, options.out+'.bra'+str(j)))
liste_temp.append(str(j))
outfile = open(options.out+'_File'+str(j)+'.temp','w')
outfile.write(line)
else:
outfile.write(line)
j += 1
else:
outfile.write(line)
else:
outfile.write(line)
outfile.close()
liste_process = []
for n in liste_job:
t = multiprocessing.Process(target=run_job, args=(n, 'Bug lauching blat_results_analyzer_v3.pl',))
liste_process.append(t)
if len(liste_process) == proc:
# Starts threads
for process in liste_process:
process.start()
# This blocks the calling thread until the thread whose join() method is called is terminated.
for process in liste_process:
process.join()
#the processes are done
liste_process = []
if liste_process:
# Starts threads
for process in liste_process:
process.start()
# This blocks the calling thread until the thread whose join() method is called is terminated.
for process in liste_process:
process.join()
#the processes are done
liste_process = []
os.system('cat '+options.out+'.bra* > '+options.out+'_final')
for n in liste_temp:
os.remove(options.out+'_File'+n+'.temp')
os.remove(options.out+'.bra'+n)
doublon = set()
dico = {}
file = open(options.out+'_final')
for line in file:
data = line.split()
if data:
if data[0] in dico:
doublon.add(data[0])
else:
dico[data[0]] = line
file.close()
for n in dico:
if not(n in doublon):
print('\t'.join(dico[n].split()))
os.remove(options.out+'_final')
if __name__ == "__main__": __main__()
|
SouthGreenPlatform/scaffhunter
|
bin/BLAT_gros.py
|
Python
|
gpl-3.0
| 5,210
|
[
"BLAST"
] |
3137fc02a0909838fa7c3a2f7c92f2d556ff5346058b4b14b129aae4ccf12842
|
# -*- coding: utf-8 -*-
# General Django settings for mysite project.
import os
import sys
import django.conf.global_settings as DEFAULT_SETTINGS
import logging
import mysite.pipelinefiles as pipelinefiles
import mysite.utils as utils
from celery.schedules import crontab
try:
import psycopg2
except ImportError:
# If psycopg2 is not installed, expect psycopg2cffi, which can be used with
# PyP. Make sure psycopg2cffi runs in compatibility mode so that it can be
# imported as psycopg2.
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
raise ImportError("Need either psycopg2 or psycopg2cffi")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Make Django root folder available
PROJECT_ROOT = utils.relative('..', '..')
# Add all subdirectories of project, applications and lib to sys.path
for subdirectory in ('projects', 'applications', 'lib'):
full_path = os.path.join(PROJECT_ROOT, subdirectory)
sys.path.insert(0, full_path)
# A list of people who get code error notifications. They will get an email
# if DEBUG=False and a view raises an exception.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# At the moment CATMAID doesn't support internationalization and all strings are
# expected to be in English.
LANGUAGE_CODE = 'en-gb'
# A tuple in the same format as ADMINS of people who get broken-link
# notifications when SEND_BROKEN_LINKS_EMAILS=True.
MANAGERS = ADMINS
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# For API tokens. Disable if not using HTTPS:
'catmaid.middleware.AuthenticationHeaderExtensionMiddleware',
'catmaid.middleware.CsrfBypassTokenAuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'catmaid.middleware.AnonymousAuthenticationMiddleware',
'catmaid.middleware.AjaxExceptionMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
# Instead of 'django.contrib.admin', in order to disable the automatic
# auto-discovery, which would interfer with django-adminplus.
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
'django.contrib.gis',
'taggit',
'adminplus',
'guardian',
'catmaid',
'pgcompat',
'performancetests',
'pipeline',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'channels',
'allauth',
'allauth.account',
'allauth.socialaccount',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'catmaid': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'catmaid.frontend': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'celery': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ # Extra folders
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request', # Needed for allauth
'django.contrib.messages.context_processors.messages'
],
}
},
]
ACCOUNT_EMAIL_VERIFICATION = 'none'
# Redirect to start page after a login
LOGIN_REDIRECT_URL = 'catmaid:home'
# The URL where requests are redirected after login
LOGIN_URL = '/accounts/login'
AUTHENTICATION_BACKENDS = (
# Default: allow login through regular Django users
'django.contrib.auth.backends.ModelBackend',
# Needed for object level permissions.
'guardian.backends.ObjectPermissionBackend',
# For API tokens. Disable if not using HTTPS:
'rest_framework.authentication.TokenAuthentication',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# If a request is authenticated through an API token permissions are
# required, endpoints that require write/annotate permissions also
# need to have the TokenAnnotate permission. This is enforced also
# for admin accounts.
REQUIRE_EXTRA_TOKEN_PERMISSIONS = True
# Main ASGI router for CATMAID
ASGI_APPLICATION = "mysite.routing.application"
# Project ID of a dummy project that will keep all ontologies and
# classifications that are shared between multiple projects (and are
# thereby project independent).
ONTOLOGY_DUMMY_PROJECT_ID = -1
# Store datetimes as UTC by default. If stored datetimes have a timezone or
# offset, interpret it.
USE_TZ = True
# The current site in the django_site database table. This is used so that
# applications can hook into specific site(s) and a single database can manage
# content of multiple sites.
SITE_ID = 1
# Defines which type of spatial query should be used for treenodes. The
# available options are 'classic', 'postgis2d' and 'postgis3d'. Additionally,
# cache tables can be populated, which allows to make use of the following node
# providers: cached_json, cached_json_text and cached_msgpack. If multiple are
# provided, node providers are asked one after the other for a result until a
# result is returned. Entries can either be node provider names or tuples of
# the form (name, options) to provide options for a particular node provider.
NODE_PROVIDERS = [
'postgis3d'
]
# By default, prepared statements are disabled. If connection pooling is used,
# this can further improve performance.
PREPARED_STATEMENTS = False
# History tables are created and populated by default. They keep track of every
# change in all CATMAID tables plus some additional ones. If this is not
# wanted, history tables can be disabled by setting HISTORY_TRACKING to False.
# Note that the tables will still exist, but only not populated.
HISTORY_TRACKING = True
# Default user profile settings
PROFILE_INDEPENDENT_ONTOLOGY_WORKSPACE_IS_DEFAULT = False
PROFILE_SHOW_TEXT_LABEL_TOOL = False
PROFILE_SHOW_TAGGING_TOOL = False
PROFILE_SHOW_CROPPING_TOOL = False
PROFILE_SHOW_SEGMENTATION_TOOL = False
PROFILE_SHOW_TRACING_TOOL = False
PROFILE_SHOW_ONTOLOGY_TOOL = False
PROFILE_SHOW_ROI_TOOL = False
# Defines if a cropped image of a ROI should be created
# automatically when the ROI is created. If set to False
# such an image will be created when requested.
ROI_AUTO_CREATE_IMAGE = False
# A limit on the size of the result returned by a single spatial query. This
# determines the maximum number of nodes shown in the tracing overlay, so has
# severe worst-case performance implications for the database, web server, and
# client. Note that this is not a direct limit on the number of nodes in the
# result; that will be between 1x and 2x this value.
NODE_LIST_MAXIMUM_COUNT = 3500
# Default importer tile width, tile height and tile source type
IMPORTER_DEFAULT_DATA_SOURCE = 'filesystem'
IMPORTER_DEFAULT_TILE_WIDTH = 512
IMPORTER_DEFAULT_TILE_HEIGHT = 512
IMPORTER_DEFAULT_TILE_SOURCE_TYPE = 1
IMPORTER_DEFAULT_IMAGE_BASE = ''
# Some tools and widgets create files (e.g. cropping, ROIs, NeuroHDF5 and
# treenode export). These files will be created in a folder for each tool
# relative to the path defined in Django's MEDIA_ROOT variable. These are
# the default sub-folders, all of them need to be writable:
MEDIA_HDF5_SUBDIRECTORY = 'hdf5'
MEDIA_CROPPING_SUBDIRECTORY = 'cropping'
MEDIA_ROI_SUBDIRECTORY = 'roi'
MEDIA_TREENODE_SUBDIRECTORY = 'treenode_archives'
MEDIA_EXPORT_SUBDIRECTORY = 'export'
MEDIA_CACHE_SUBDIRECTORY = 'cache'
# Cropping output extension
CROPPING_OUTPUT_FILE_EXTENSION = "tiff"
CROPPING_OUTPUT_FILE_PREFIX = "crop_"
CROPPING_VERIFY_CERTIFICATES = True
# The maximum allowed size in Bytes for generated files. The cropping tool, for
# instance, uses this to cancel a request if the generated file grows larger
# than this. This defaults to 50 Megabyte.
GENERATED_FILES_MAXIMUM_SIZE = 52428800
# The maximum allowed size in bytes for files uploaded for import as skeletons.
# The default is 5 megabytes.
IMPORTED_SKELETON_FILE_MAXIMUM_SIZE = 5242880
# The maximum allowed image size for imported images. The default is 3MB.
IMPORTED_IMAGE_FILE_MAXIMUM_SIZE = 3145728
# The maximum allowd body data size, default is 10 MB.
DATA_UPLOAD_MAX_MEMORY_SIZE = 10 * 8 * 1024**2
# Specifies if user registration is allowed
USER_REGISTRATION_ALLOWED = False
# If newly registered useres need to confirm their email address.
USER_REGISTRATION_EMAIL_CONFIRMATION_REQUIRED = False
USER_REGISTRATION_EMAIL_WELCOME_EMAIL = True
USER_REGISTRATION_EMAIL_REPLY_TO = None
USER_REGISTRATION_EMAIL_CONFIRMATION_EMAIL_TEXT = """{% autoescape off %}Hi {{ user.first_name }},
Please click on the following link to confirm your CATMAID registration:
https://{{ domain }}{% url 'catmaid:activate' uidb64=uid token=token %}
{% endautoescape %}
"""
USER_REGISTRATION_EMAIL_WELCOME_EMAIL_TEXT = """Hi {{ user.first_name }},
here I send you the details about your new CATMAID login. You can find
the training website here:
https://{{ domain }}{% url 'catmaid:home' %}
CATMAID support the Chrome and Firefox browsers, other browsers might or
might not work. Once the actual website is loaded and you see the
CATMAID logo, you can sign in using your CATMAID username and password
you provided at the registration.
Right next to your name there is a question mark icon, which you can use
to toggle some context specific help to get you started.
You should also see the visible projects listed in the main part of the
page. Clicking on either an image or link will open main viewer at some
central location. Clicking the neuron like structure in the top toolbar
of that viewer will open the Tracing Tool, which gives access to tracing
data and related tools.
Note that by default you will be in a "select" interaction mode, i.e.
you click on neurons in the 2D view, but you won't create and modify
data. If you want to trace neurons or change existing data, you would
need to switch to tracing mode, which you can do by clicking the left
most icon in the second toolbar from the top (the one connecting dots).
Don't hesitate to ask, if you run into problems or have questions and
comments.
Best,
The CATMAID admins
"""
USER_REGISTRATION_CONFIRM_TERMS = False
USER_REGISTRATION_CONFIRM_TERMS_TEXT = """By joining this CATMAID server you accept the following terms and conditions:
<ol>
<li>The provider of both this service and all displayed data is not responsible
for any data lost.</li>
</ol>
"""
# A new user's defaul groups
NEW_USER_DEFAULT_GROUPS = []
# Whether the creation of new users should cause the creation of a group with
# the same name. This groups is used to manage edit permission on the user's
# data.
NEW_USER_CREATE_USER_GROUP = True
# While pickle can cause security problems [1], we allow it for now and trust
# that the Celery server will only accept connections from CATMAID. To improve
# security, this should be changed though, see also [2].
# [1] http://docs.celeryproject.org/en/latest/userguide/security.html#serializers
# [2] https://github.com/catmaid/CATMAID/issues/630
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_TASK_SERIALIZER = 'pickle'
# The Celery beat schedule is defined in UTC by default. This can be changed
# using the following two variables:
# CELERY_TIMEZONE = 'America/New_York'
# CELERY_ENABLE_UTC = False
# The default set of periodic tasks
CELERY_BEAT_SCHEDULE = {
# Clean cropped stack directory every night at 23:30.
'daily-crop-data-cleanup': {
'task': 'catmaid.tasks.cleanup_cropped_stacks',
'schedule': crontab(hour=23, minute=30)
},
# Update project statistics every night at 23:45.
'daily-project-stats-summary-update': {
'task': 'catmaid.tasks.update_project_statistics_from_scratch',
'schedule': crontab(hour=23, minute=45)
},
'daily-inactive-user-update': {
'task': 'catmaid.tasks.deactivate_inactive_users',
'schedule': crontab(hour=00, minute=00)
},
}
# We use django-pipeline to compress and reference JavaScript and CSS files. To
# make Pipeline integrate with staticfiles (and therefore collecstatic calls)
# the STATICFILES_STORAGE variable has to be set to:
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Adding PipelineFinder as asset discovery mechanism allows staticfiles to also
# discover files that were generated by Pipeline.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
PIPELINE = {
# Use CSSMin as django-pipeline's CSS compressor
'CSS_COMPRESSOR': 'pipeline.compressors.cssmin.CSSMinCompressor',
# Use no JS compresor for now
'JS_COMPRESSOR': None,
# Don't wrap JS files into anonymous functions. Our code isn't ready for
# this, yet.
'DISABLE_WRAPPER': True,
# All static files that are run through pipeline
'STYLESHEETS': pipelinefiles.STYLESHEETS,
'JAVASCRIPT': pipelinefiles.JAVASCRIPT
}
# Make a list of files that should be included directly (bypassing pipeline)
# and a list of pipeline identifiers for all others.
NON_COMPRESSED_FILE_IDS = list(pipelinefiles.non_pipeline_js)
NON_COMPRESSED_FILES = list(pipelinefiles.non_pipeline_js.values())
COPY_ONLY_FILE_IDS = set(pipelinefiles.copy_only_files)
STYLESHEET_IDS = list(pipelinefiles.STYLESHEETS)
COMPRESSED_FILE_IDS = [key for key in pipelinefiles.JAVASCRIPT \
if key not in NON_COMPRESSED_FILE_IDS \
and key not in COPY_ONLY_FILE_IDS]
INSTALLED_EXTENSIONS = tuple(pipelinefiles.installed_extensions)
# Make Git based version of CATMAID available as a settings field
VERSION = utils.get_version()
# Janelia rendering service. To activate add the following lines to your
# settings.py file:
# MIDDLEWARE += ('catmaid.middleware.JaneliaRenderMiddleware',)
# JANELIA_RENDER_SERVICE_URL = 'http://renderer.int.janelia.org:8080/render-ws/v1'
# JANELIA_RENDER_DEFAULT_STACK_RESOLUTION = (4,4,35)
# JANELIA_RENDER_STACK_TILE_WIDTH = 1024
# JANELIA_RENDER_STACK_TILE_HEIGHT = 1024
# DVID auto-discovery. To activate add the following lines to your settings.py
# file:
# MIDDLEWARE += ('catmaid.middleware.DVIDMiddleware',)
# DVID_URL = 'http://emdata2.int.janelia.org:7000'
# DVID_FORMAT = 'jpg:80'
# DVID_SHOW_NONDISPLAYABLE_REPOS = True
# In order to make Django work with the unmanaged models from djsopnet in tests,
# we use a custom testing runner to detect when running in a testing
# environment. The custom PostgreSQL database wrapper uses this flag to change
# its behavior.
TEST_RUNNER = 'custom_testrunner.TestSuiteRunner'
# By default, front end tests are disabled.
FRONT_END_TESTS_ENABLED = False
# By default GUI tests are disabled. Enable them by setting GUI_TESTS_ENABLED to
# True (done during CI).
GUI_TESTS_ENABLED = False
GUI_TESTS_REMOTE = False
# To simplify configuration for performance test CATMAID instances, the SCM URL
# used to create commit links is defined here. The {} is used to denote the
# commit name.
PERFORMANCETEST_SCM_URL = "https://github.com/catmaid/CATMAID/commit/{version}"
# This setting allows the WSGI back-end to serve static files. It is highly
# discouraged to use this in production as it is very in-efficient and
# potentially insecure. It is used only to simplify continuous integration.
SERVE_STATIC = False
# Additional static files can be loaded by CATMAID if they are placed in the
# folder defined by STATIC_EXTENSION_ROOT. These files are not respected by
# Pipeline to allow updating them without running collectstatic. To use this
# feature, your webserver has to resolve the STATIC_EXTENSION_URL to this
# folder.
STATIC_EXTENSION_URL = "/staticext/"
STATIC_EXTENSION_ROOT = "/tmp"
STATIC_EXTENSION_FILES = []
# Default cookie suffix, should be customized if multiple CATMAID instances run
# on the same server, e.g. with:
# hashlib.md5(CATMAID_URL.encode('utf-8')).hexdigest()
COOKIE_SUFFIX = 'catmaid'
# The CATMAID web client sends list by sending each list element in its own
# field. Django allows by default 1000 fields. To allow large neuron lists, we
# need to disable this check for now.
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
# If no authentication is possible, use guardian's anonymous user
'UNAUTHENTICATED_USER': 'guardian.utils.get_anonymous_user',
'VIEW_DESCRIPTION_FUNCTION': 'custom_rest_swagger_googledoc.get_googledocstring',
# Parser classes priority-wise for Swagger
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework.parsers.JSONParser',
],
'DEFAULT_SCHEMA_CLASS': 'custom_swagger_schema.CustomSchema',
'URL_FORMAT_OVERRIDE': None,
}
SWAGGER_SETTINGS = {
'DOC_EXPANSION': 'list',
'APIS_SORTER': 'alpha'
}
# Needed for NRRD export
CATMAID_FULL_URL = ""
CATMAID_HTTP_AUTH_USER = None
CATMAID_HTTP_AUTH_PASS = None
# Whether or not to create default data views in the initial migration. This is
# mainly useful for setups using the JaneliaRender or DVID middleware.
CREATE_DEFAULT_DATAVIEWS = True
# NBLAST support
NBLAST_ALL_BY_ALL_MIN_SIZE = 10
MAX_PARALLEL_ASYNC_WORKERS = 1
# Intersection grid settings, dimensions in project coordinates (nm)
DEFAULT_CACHE_GRID_CELL_WIDTH = 25000
DEFAULT_CACHE_GRID_CELL_HEIGHT = 25000
DEFAULT_CACHE_GRID_CELL_DEPTH = 40
# Whether Postgres should emit "catmaid.spatial-update" events on changes of
# spatial data (e.g. inserts, updates and deletions of treenodes, connectors and
# connector links).
SPATIAL_UPDATE_NOTIFICATIONS = False
# On statup, the default client instance settings can be populated based on a
# JSON string, representing a list of objects with a "key" field and a "value"
# field. These settings will only be applied if they exist already.
INSTANCE_CLIENT_SETTINGS = None
# Whether or not the set up instance client settings in INSTANCE_CLIENT_SETTINGS
# should be reset every time CATMAID starts. Otherwise, they will only be
# applied if they don't exist already.
FORCE_CLIENT_SETTINGS = False
# If enabled, logged in users will only see other users and groups if they share
# a project token.
PROJECT_TOKEN_USER_VISIBILITY = False
|
catmaid/CATMAID
|
django/projects/mysite/settings_base.py
|
Python
|
gpl-3.0
| 19,870
|
[
"NEURON"
] |
0e3e82784678eb6cdf24444a1ca6b50e3f6d746c21354db113cd2fed11a4a446
|
"""
This example shows the reconstruction of a binary image from a reduced set of
tomographic projections. Synthetic binary data of size 128x128 are generated,
and 40 projections along regularly-spaced angles are computed.
Different versions of the BP-tomo algorithm are compared:
- the uncoupled version: interactions between neighboring spins are used
only for four directions. On the other angles, we just impose that the
sum of magnetizations corresponds to the tomographic measurement. This
is the fastest method (the difference is even greater for a large image
size).
- the coupled version: spins are coupled on all lines, and an Ising chain is
solved for each line at each iteration. Note that different
values of the coupling factor J have to be used for the coupled and
uncoupled versions!
- the mean field version: pixels pass the same value to all factors to which
they belong.
All versions are supposed to give similar results. The mean field version
gives better results for a large number of measurements, where the mean field
approximation is very good.
You may play on the parameters `L` (linear size of the image), `n_dir` (number
of angles) and `n_pts` (the size of structures is proportional to
1/sqrt(n_pts)).
"""
print(__doc__)
import numpy as np
from scipy import sparse
from bptomo.bp_reconstruction import BP_step, BP_step_asym, BP_step_mf, \
_initialize_field, _calc_hatf_mf, _calc_hatf
from bptomo.build_projection_operator import build_projection_operator
from bptomo.util import generate_synthetic_data
import matplotlib.pyplot as plt
from time import time
def generate_data(L, n_dir, sigma=1, n_pts=100):
"""
Parameters
----------
L: int
linear size of the image
n_dir: int
number of angles
sigma: float
absolute intensity of Gaussian noise added on projections
n_pts: int
Parameter used to tune the size of structures in the image.
"""
# Generate synthetic binary data (pixels values in {-1, 1})
im = generate_synthetic_data(L, n_pts=n_pts)
im -= 0.5
im *= 2
X, Y = np.ogrid[:L, :L]
mask = ((X - L/2)**2 + (Y - L/2)**2 <= (L/2)**2)
im[~mask] = 0 # we only consider pixels inside a central circle
# Build projection data with noise
op = build_projection_operator(L, n_dir, mask=mask)
y = (op * im[mask][:, np.newaxis]).ravel()
# Add some noise
np.random.seed(0)
y += sigma*np.random.randn(*y.shape)
# lil sparse format is needed to retrieve indices efficiently
op = sparse.lil_matrix(op)
return im, mask, y, op
L, n_dir, sigma, n_pts = 128, 40, 1, 100
im, mask, y, op = generate_data(L, n_dir, sigma, n_pts)
n_iter = 18
# ------------------ Uncoupled lines -------------------------------------
# Prepare fields
sums_uncoupled = [] # total magnetization
h_m_to_px = _initialize_field(y, L, op) # measure to pixel
h_px_to_m, first_sum = _calc_hatf(h_m_to_px) # pixel to measure
h_ext = np.zeros_like(y) # external field
t0 = time()
for i in range(n_iter):
print "iteration %d / %d" %(i + 1, n_iter)
h_m_to_px, h_px_to_m, h_sum, h_ext = BP_step_asym(h_m_to_px, h_px_to_m,
y, op, L, hext=h_ext, J=2)
sums_uncoupled.append(h_sum)
t1 = time()
print "uncoupled lines: reconstruction done in %f s" %(t1 - t0)
# Compute segmentation error from ground truth
err_uncoupled = [np.abs((sumi>0) - (im>0)[mask]).sum() for sumi in
sums_uncoupled]
print("number of errors vs. iteration: ")
print(err_uncoupled)
res = np.zeros_like(im)
res[mask] = sums_uncoupled[-1]
# Plot result
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.imshow(im, cmap='gray')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(res, vmin=-10, vmax=10,
interpolation='nearest')
plt.axis('off')
plt.title('local magnetization')
plt.subplot(133)
plt.semilogy(err_uncoupled, 'o', ms=8)
plt.xlabel('$n$', fontsize=18)
plt.title('uncoupled lines: # of errors')
plt.show()
# ------------------ Coupled lines -------------------------------------
# Prepare fields
sums_coupled = [] # total magnetization
h_m_to_px = _initialize_field(y, L, op) # measure to pixel
h_px_to_m, first_sum = _calc_hatf(h_m_to_px) # pixel to measure
h_ext = np.zeros_like(y) # external field
t0 = time()
for i in range(n_iter):
print "iteration %d / %d" %(i + 1, n_iter)
h_m_to_px, h_px_to_m, h_sum, h_ext = BP_step(h_m_to_px, h_px_to_m,
y, op, L, hext=h_ext)
sums_coupled.append(h_sum)
t1 = time()
print "coupled lines: reconstruction done in %f s" %(t1 - t0)
# Compute segmentation error from ground truth
err_coupled = [np.abs((sumi>0) - (im>0)[mask]).sum() for sumi in sums_coupled]
print("number of errors vs. iteration: ")
print(err_coupled)
res = np.zeros_like(im)
res[mask] = sums_coupled[-1]
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.imshow(im, cmap='gray')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(res, vmin=-10, vmax=10,
interpolation='nearest')
plt.axis('off')
plt.title('local magnetization')
plt.subplot(133)
plt.semilogy(err_coupled, 'o', ms=8)
plt.xlabel('$n$', fontsize=18)
plt.title('coupled lines: # of errors')
plt.show()
# ------------------ Mean field -------------------------------------
# Prepare fields
sums_mf = [] # total magnetization
h_m_to_px = _initialize_field(y, L, op) # measure to pixel
h_px_to_m, first_sum = _calc_hatf(h_m_to_px) # pixel to measure
h_sum = _calc_hatf_mf(h_m_to_px) # pixel to measure
h_ext = np.zeros_like(y) # external field
t0 = time()
for i in range(n_iter):
print "iteration %d / %d" %(i + 1, n_iter)
h_m_to_px, h_sum, h_ext = BP_step_mf(h_m_to_px, h_sum, y, op, L, hext=h_ext)
sums_mf.append(h_sum)
t1 = time()
print "coupled lines: reconstruction done in %f s" %(t1 - t0)
# Compute segmentation error from ground truth
err_mf = [np.abs((sumi>0) - (im>0)[mask]).sum() for sumi in sums_mf]
print("number of errors vs. iteration: ")
print(err_mf)
res = np.zeros_like(im)
res[mask] = sums_mf[-1]
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.imshow(im, cmap='gray')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(res, vmin=-10, vmax=10,
interpolation='nearest')
plt.axis('off')
plt.title('local magnetization')
plt.subplot(133)
plt.semilogy(err_mf, 'o', ms=8)
plt.xlabel('$n$', fontsize=18)
plt.title('mean field: # of errors')
plt.show()
|
eddam/bp-for-tomo
|
demo_bp_flavors.py
|
Python
|
bsd-3-clause
| 6,604
|
[
"Gaussian"
] |
d361a8b6a21e34b45fddbb29309f951f932e0442038271c956f95ab0dc94460e
|
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
import mox
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as vif_obj
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import quota
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_floating_ip
from nova.tests.objects import test_network
from nova.tests.objects import test_service
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1::%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **networks[1])])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_instance_dns(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
fixedip = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None
).AndReturn(fixedip)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
reserve.side_effect = exception.OverQuota(overs='testing')
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
class FlatDHCPNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_parameter(self):
# vlan parameter could not be greater than 4094
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=4095, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be greater than 4094'
self.assertIn(error_msg, six.text_type(exc))
# vlan parameter could not be less than 1
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=0, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be less than 1'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_be_integer(self):
# vlan must be an integer
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan='fake', cidr='192.168.0.1/24')
error_msg = 'vlan must be an integer'
self.assertIn(error_msg, six.text_type(exc))
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
self.flags(use_local=True, group='conductor')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
use_slave=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_topic')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.deleted = True
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
saleemjaveds/https-github.com-openstack-nova
|
nova/tests/network/test_manager.py
|
Python
|
apache-2.0
| 149,206
|
[
"FEFF"
] |
dbf5b9ed708ded3d6b32f39b2a75402b3aae399ff2b169659942fb41b4e8e1fc
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
dec,
assert_,
assert_equal,
)
from unittest import skip
import MDAnalysis as mda
from MDAnalysisTests.datafiles import PSF, DCD
from MDAnalysisTests import parser_not_found
class TestSegmentGroup(object):
# Legacy tests from before 363
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
"""Set up the standard AdK system in implicit solvent."""
self.universe = mda.Universe(PSF, DCD)
self.g = self.universe.atoms.segments
def test_newSegmentGroup(self):
"""test that slicing a SegmentGroup returns a new SegmentGroup (Issue 135)"""
g = self.universe.atoms.segments
newg = g[:]
assert_(isinstance(newg, mda.core.groups.SegmentGroup))
assert_equal(len(newg), len(g))
def test_n_atoms(self):
assert_equal(self.g.n_atoms, 3341)
def test_n_residues(self):
assert_equal(self.g.n_residues, 214)
def test_resids_dim(self):
assert_equal(len(self.g.resids), len(self.g))
for seg, resids in zip(self.g, self.g.resids):
assert_(len(resids) == len(seg.residues))
assert_equal(seg.residues.resids, resids)
def test_resnums_dim(self):
assert_equal(len(self.g.resnums), len(self.g))
for seg, resnums in zip(self.g, self.g.resnums):
assert_(len(resnums) == len(seg.residues))
assert_equal(seg.residues.resnums, resnums)
def test_segids_dim(self):
assert_equal(len(self.g.segids), len(self.g))
def test_set_segids(self):
s = self.universe.select_atoms('all').segments
s.segids = 'ADK'
assert_equal(self.universe.segments.segids, ['ADK'],
err_msg="failed to set_segid on segments")
def test_set_segid_updates_self(self):
g = self.universe.select_atoms("resid 10:18").segments
g.segids = 'ADK'
assert_equal(g.segids, ['ADK'],
err_msg="old selection was not changed in place after set_segid")
def test_atom_order(self):
assert_equal(self.universe.segments.atoms.indices,
sorted(self.universe.segments.atoms.indices))
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/core/test_segmentgroup.py
|
Python
|
gpl-2.0
| 3,296
|
[
"MDAnalysis"
] |
754cae3b8f6d0dcba88dbdd9e49f1abc8231fa610fdcaa18dbc01f197ce5341f
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def AddSoft(system, comX, comY, comZ, k1, k2):
# currently only works for ONE SINGLE soft object
# open file and add nodes
with open("tables/softPositions", "r") as fp:
numPoints = int(fp.readline())
print "Found " + str(numPoints) + " nodes"
# actual add
for i in range(0, numPoints):
line = str.split(fp.readline())
X = float(line[0]) + comX
Y = float(line[1]) + comY
Z = float(line[2]) + comZ
# print X, Y, Z
system.part.add(id=i, pos=[X, Y, Z], virtual=1)
# triangles
from espressomd.interactions import IBM_Triel
with open("tables/softTriangles", "r") as fp:
numTri = int(fp.readline())
print "Found " + str(numTri) + " triangles"
# actual add
for i in range(0, numTri):
line = str.split(fp.readline())
id1 = int(line[0])
id2 = int(line[1])
id3 = int(line[2])
tri = IBM_Triel(ind1=id1, ind2=id2, ind3=id3,
elasticLaw="Skalak", k1=k1, k2=k2, maxDist=5)
system.bonded_inter.add(tri)
system.part[id1].add_bond((tri, id2, id3))
|
hmenke/espresso
|
samples/immersed_boundary/addSoft.py
|
Python
|
gpl-3.0
| 1,916
|
[
"ESPResSo"
] |
8848f361973a5e19a9dd793e57518b99f475a3fa02635c8b2fc828d74d59e530
|
import calendar
import json
import re
import uuid
import mock
from django.core.cache import cache
from django.test import TestCase
from django.contrib.auth.models import User
from django.core import mail
from funfactory.urlresolvers import reverse
from nose.tools import eq_, ok_
from airmozilla.main.models import Event
from airmozilla.comments.views import (
can_manage_comments,
get_latest_comment
)
from airmozilla.comments.models import (
Discussion,
Comment,
Unsubscription
)
MOZILLIAN_USER = """
{
"meta": {
"previous": null,
"total_count": 1,
"offset": 0,
"limit": 20,
"next": null
},
"objects": [
{
"website": "",
"bio": "",
"resource_uri": "/api/v1/users/2429/",
"last_updated": "2012-11-06T14:41:47",
"groups": [
"ugly tuna"
],
"city": "Casino",
"skills": [],
"country": "Albania",
"region": "Bush",
"id": "2429",
"languages": [],
"allows_mozilla_sites": true,
"photo": "http://www.gravatar.com/avatar/0409b497734934400822bb33...",
"is_vouched": true,
"email": "peterbe@mozilla.com",
"ircname": "",
"allows_community_sites": true,
"full_name": "Peter Bengtsson"
}
]
}
"""
class Response(object):
def __init__(self, content=None, status_code=200):
self.content = content
self.status_code = status_code
class TestComments(TestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
def _create_discussion(self, event, enabled=True, moderate_all=True,
notify_all=True):
return Discussion.objects.create(
event=event,
enabled=enabled,
moderate_all=moderate_all,
notify_all=notify_all
)
def test_can_manage_comments(self):
event = Event.objects.get(title='Test event')
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
richard = User.objects.create(username='richard',
email='richard@mozilla.com',
is_superuser=True)
discussion = self._create_discussion(event)
discussion.moderators.add(jay)
ok_(not can_manage_comments(bob, discussion))
ok_(can_manage_comments(jay, discussion))
ok_(can_manage_comments(richard, discussion))
def test_get_latest_comment(self):
event = Event.objects.get(title='Test event')
eq_(get_latest_comment(event), None)
# or by ID
eq_(get_latest_comment(event.pk), None)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
event=event,
user=bob,
comment="Hi, it's Bob",
status=Comment.STATUS_POSTED
)
latest = get_latest_comment(event)
eq_(latest, None)
latest = get_latest_comment(event, include_posted=True)
modified = calendar.timegm(comment.modified.utctimetuple())
eq_(latest, modified)
# again, or by event ID
latest_second_time = get_latest_comment(event.pk, include_posted=True)
eq_(latest, latest_second_time)
def test_basic_event_data(self):
event = Event.objects.get(title='Test event')
# render the event and there should be no comments
url = reverse('main:event', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Comments' not in response.content)
# if not enabled you get that back in JSON
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.get(comments_url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], False)
# also, trying to post a comment when it's not enable
# should cause an error
response = self.client.post(comments_url, {
'name': 'Peter',
'comment': 'Bla bla'
})
eq_(response.status_code, 400)
# enable discussion
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
discussion.moderators.add(jay)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Comments' in response.content)
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.get(comments_url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], True)
eq_(structure['discussion']['closed'], False)
ok_('No comments posted' in structure['html'])
# even though it's enabled, it should reject postings
# because we're not signed in
response = self.client.post(comments_url, {
'name': 'Peter',
'comment': 'Bla bla'
})
eq_(response.status_code, 403)
# so, let's sign in and try again
User.objects.create_user('richard', password='secret')
# but it should be ok if self.user had the add_event permission
assert self.client.login(username='richard', password='secret')
response = self.client.post(comments_url, {
'name': 'Richard',
'comment': 'Bla bla'
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
ok_('No comments posted' not in structure['html'])
ok_('Bla bla' in structure['html'])
comment = Comment.objects.get(comment='Bla bla')
ok_(comment)
eq_(comment.status, Comment.STATUS_POSTED)
# the moderator should now have received an email
email_sent = mail.outbox[-1]
ok_(event.title in email_sent.subject)
ok_('requires moderation' in email_sent.subject)
ok_(url in email_sent.body)
ok_(url + '#comment-%d' % comment.pk in email_sent.body)
def test_post_comment_no_moderation(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event, moderate_all=False)
User.objects.create_user('richard', password='secret')
assert self.client.login(username='richard', password='secret')
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(comments_url, {
'name': 'Richard',
'comment': 'Bla bla'
})
eq_(response.status_code, 200)
# structure = json.loads(response.content)
comment = Comment.objects.get(event=event)
eq_(comment.status, Comment.STATUS_APPROVED)
def test_moderation_immediately(self):
"""when you post a comment that needs moderation, the moderator
can click a link in the email notification that immediately
approves the comment without being signed in"""
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED
)
identifier = uuid.uuid4().hex[:10]
cache.set('approve-%s' % identifier, comment.pk, 60)
cache.set('remove-%s' % identifier, comment.pk, 60)
approve_url = reverse(
'comments:approve_immediately',
args=(identifier, comment.pk)
)
remove_url = reverse(
'comments:remove_immediately',
args=(identifier, comment.pk)
)
response = self.client.get(approve_url)
eq_(response.status_code, 200)
ok_('Comment Approved' in response.content)
# reload
comment = Comment.objects.get(pk=comment.pk)
eq_(comment.status, Comment.STATUS_APPROVED)
response = self.client.get(remove_url)
eq_(response.status_code, 200)
ok_('Comment Removed' in response.content)
# reload
comment = Comment.objects.get(pk=comment.pk)
eq_(comment.status, Comment.STATUS_REMOVED)
# try with identifiers that aren't in the cache
bogus_identifier = uuid.uuid4().hex[:10]
bogus_approve_url = reverse(
'comments:approve_immediately',
args=(bogus_identifier, comment.pk)
)
bogus_remove_url = reverse(
'comments:remove_immediately',
args=(bogus_identifier, comment.pk)
)
response = self.client.get(bogus_approve_url)
eq_(response.status_code, 200)
ok_('Comment Approved' not in response.content)
ok_('Unable to Approve Comment' in response.content)
response = self.client.get(bogus_remove_url)
eq_(response.status_code, 200)
ok_('Comment Removed' not in response.content)
ok_('Unable to Remove Comment' in response.content)
def test_unsubscribe_on_reply_notifications(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'name': 'Jay',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
ok_('Bla bla' in structure['html'])
ok_('I think this' in structure['html'])
# now, we must approve this comment
new_comment = Comment.objects.get(
comment='I think this',
user=jay
)
response = self.client.post(url, {
'approve': new_comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure, {'ok': True})
email_sent = mail.outbox[-1]
ok_('Reply' in email_sent.subject)
ok_(event.title in email_sent.subject)
eq_(email_sent.to, ['bob@mozilla.com'])
# expect there to be two unsubscribe links in there
url_unsubscribe = re.findall(
'/comments/unsubscribe/\w{10}/\d+/',
email_sent.body
)[0]
urls_unsubscribe_all = re.findall(
'/comments/unsubscribe/\w{10}/',
email_sent.body
)
for url in urls_unsubscribe_all:
if not url_unsubscribe.startswith(url):
url_unsubscribe_all = url
self.client.logout()
# now let's visit these
response = self.client.get(url_unsubscribe)
eq_(response.status_code, 200)
ok_('Are you sure' in response.content)
response = self.client.post(url_unsubscribe, {})
eq_(response.status_code, 302)
Unsubscription.objects.get(
user=bob,
discussion=discussion
)
unsubscribed_url = reverse(
'comments:unsubscribed',
args=(discussion.pk,)
)
ok_(unsubscribed_url in response['location'])
response = self.client.get(unsubscribed_url)
eq_(response.status_code, 200)
ok_('Unsubscribed' in response.content)
ok_(event.title in response.content)
response = self.client.post(url_unsubscribe_all, {})
eq_(response.status_code, 302)
Unsubscription.objects.get(
user=bob,
discussion__isnull=True
)
unsubscribed_url = reverse('comments:unsubscribed_all')
ok_(unsubscribed_url in response['location'])
def test_unsubscribed_reply_notifications_discussion(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
Unsubscription.objects.create(
user=bob,
discussion=discussion
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
# But it needs to be approved for reply notifications to
# even be attempted.
new_comment = Comment.objects.get(comment='I think this')
eq_(new_comment.reply_to.user, bob)
response = self.client.post(url, {
'approve': new_comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure, {'ok': True})
ok_(not mail.outbox)
def test_unsubscribed_reply_notifications_all(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
Unsubscription.objects.create(
user=bob,
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
# But it needs to be approved for reply notifications to
# even be attempted.
new_comment = Comment.objects.get(comment='I think this')
eq_(new_comment.reply_to.user, bob)
response = self.client.post(url, {
'approve': new_comment.pk,
})
ok_(not mail.outbox)
def test_invalid_reply_to(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': '999999999',
})
eq_(response.status_code, 400)
@mock.patch('logging.error')
@mock.patch('requests.get')
def test_fetch_user_name(self, rget, rlogging):
cache.clear()
def mocked_get(url, **options):
if 'peterbe' in url:
return Response(MOZILLIAN_USER)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('comments:user_name')
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], '')
peterbe = User.objects.create_user(
username='peterbe', password='secret'
)
assert self.client.login(username='peterbe', password='secret')
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], '')
peterbe.email = 'peterbe@mozilla.com'
peterbe.save()
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], 'Peter Bengtsson')
def test_modify_comment_without_permission(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED
)
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 403)
# and not being logged in you definitely can't post comments
response = self.client.post(url, {
'comment': "My opinion",
})
eq_(response.status_code, 403)
User.objects.create_user(username='jay', password='secret')
assert self.client.login(username='jay', password='secret')
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 403)
response = self.client.post(url, {
'unapprove': comment.pk,
})
eq_(response.status_code, 403)
response = self.client.post(url, {
'remove': comment.pk,
})
eq_(response.status_code, 403)
# but you can flag
response = self.client.post(url, {
'flag': comment.pk,
})
eq_(response.status_code, 200)
# but not unflag
response = self.client.post(url, {
'unflag': comment.pk,
})
eq_(response.status_code, 403)
def test_modify_comment_with_permission(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
jay = User.objects.create_user(username='jay', password='secret')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED,
flagged=1
)
url = reverse('comments:event_data', args=(event.pk,))
assert self.client.login(username='jay', password='secret')
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_APPROVED))
response = self.client.post(url, {
'unapprove': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_POSTED))
response = self.client.post(url, {
'remove': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_REMOVED))
response = self.client.post(url, {
'unflag': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(flagged=0))
def test_event_data_latest_400(self):
cache.clear()
event = Event.objects.get(title='Test event')
url = reverse('comments:event_data_latest', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
discussion = self._create_discussion(event)
discussion.enabled = False
discussion.save()
response = self.client.get(url)
eq_(response.status_code, 400)
def test_event_data_latest(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event)
url = reverse('comments:event_data_latest', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['latest_comment'], None)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
user=bob,
event=event,
comment="Hi, it's Bob",
status=Comment.STATUS_POSTED
)
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['latest_comment'], None)
response = self.client.get(url, {'include_posted': True})
eq_(response.status_code, 200)
structure = json.loads(response.content)
modified = calendar.timegm(comment.modified.utctimetuple())
eq_(structure['latest_comment'], modified)
# ask it again and it should be the same
response_second = self.client.get(url, {'include_posted': True})
eq_(response_second.status_code, 200)
eq_(response.content, response_second.content)
|
bugzPDX/airmozilla
|
airmozilla/comments/tests/test_views.py
|
Python
|
bsd-3-clause
| 22,064
|
[
"CASINO",
"VisIt"
] |
84a71e048f17b478950180ee90f1fe43e70baa5acc6a30dbf8e6a83206e96d09
|
from __future__ import division, print_function, absolute_import
from scipy.lib.six import xrange
import scipy.special
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.misc import comb
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
gamma = scipy.special.gamma
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in xrange(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand-side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in xrange(Mk + 1)]
shifts = [-bound - k for k in xrange(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in xrange(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
|
jsilter/scipy
|
scipy/signal/bsplines.py
|
Python
|
bsd-3-clause
| 11,622
|
[
"Gaussian"
] |
324a8d32248958d69ca5266631d7d3974dce2c5a01b2fc1042eeb1405d3c3837
|
import json
import re
from string import upper
from itertools import izip
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.db import connection
from catmaid.models import UserRole, Project, Class, ClassInstance, \
ClassInstanceClassInstance, Relation
from catmaid.control.authentication import requires_user_role, can_edit_or_fail
from catmaid.control.common import defaultdict
def create_basic_annotated_entity_query(project, params, relations, classes,
allowed_classes=['neuron', 'annotation']):
# Get IDs of constraining classes.
allowed_class_ids = [classes[c] for c in allowed_classes]
annotated_with = relations['annotated_with']
# One set for requested annotations and one for those of which
# subannotations should be included
annotations = set()
annotations_to_expand = set()
# Get name, annotator and time constraints, if available
name = params.get('neuron_query_by_name', "").strip()
annotator_id = params.get('neuron_query_by_annotator', None)
start_date = params.get('neuron_query_by_start_date', "").strip()
end_date = params.get('neuron_query_by_end_date', "").strip()
# Collect annotations and sub-annotation information
for key in params:
if key.startswith('neuron_query_by_annotation'):
annotations.add(int(params[key]))
elif key.startswith('neuron_query_include_subannotation'):
annotations_to_expand.add(int(params[key]))
# Construct a dictionary that contains all the filters needed for the
# current query.
filters = {
'project': project,
'class_column_id__in': allowed_class_ids,
}
# If a name is given, add this to the query
if name:
filters['name__iregex'] = name
# Add annotator and time constraints, if available
if annotator_id:
filters['cici_via_a__user'] = annotator_id
filters['cici_via_a__relation_id'] = annotated_with
if start_date:
filters['cici_via_a__creation_time__gte'] = start_date
if end_date:
filters['cici_via_a__creation_time__lte'] = end_date
# Get map of annotations to expand and their sub-annotations
sub_annotation_ids = get_sub_annotation_ids(project, annotations_to_expand,
relations, classes)
# Collect all annotations and their sub-annotation IDs (if requested) in a
# set each. For the actual query each set is connected with AND while
# for everything within one set OR is used.
annotation_id_sets = []
for a in annotations:
current_annotation_ids = set([a])
# Add sub annotations, if requested
sa_ids = sub_annotation_ids.get(a)
if sa_ids and len(sa_ids):
current_annotation_ids.update(sa_ids)
annotation_id_sets.append(current_annotation_ids)
# Due to Django's QuerySet syntax, we have to add the first
# annotation ID set constraint to the first filter we add.
if annotation_id_sets:
first_id_set = annotation_id_sets.pop()
filters['cici_via_a__relation_id'] = annotated_with
# Use IN (OR) for a single annotation and its sub-annotations
filters['cici_via_a__class_instance_b_id__in'] = first_id_set
# Create basic filter, possibly containing *one* annotation ID set
entities = ClassInstance.objects.filter(**filters)
# Add remaining filters for annotation constraints, if any
for annotation_id_set in annotation_id_sets:
entities = entities.filter(
cici_via_a__relation_id=annotated_with,
cici_via_a__class_instance_b_id__in=annotation_id_set)
# Create final query. Without any restriction, the result set will contain
# all instances of the given set of allowed classes.
return entities
def get_sub_annotation_ids(project_id, annotation_set, relations, classes):
""" Sub-annotations are annotations that are annotated with an annotation
from the annotation_set passed. Additionally, transivitely annotated
annotations are returned as well.
"""
if not annotation_set:
return {}
aaa_tuples = ClassInstanceClassInstance.objects.filter(
project_id=project_id,
class_instance_a__class_column=classes['annotation'],
class_instance_b__class_column=classes['annotation'],
relation_id = relations['annotated_with']).values_list(
'class_instance_b', 'class_instance_a')
# A set wrapper to keep a set in a dictionary
class set_wrapper:
def __init__(self):
self.data = set()
# Create a dictionary of all annotations annotating a set of annotations
aaa = {}
for aa in aaa_tuples:
sa_set = aaa.get(aa[0])
if sa_set is None:
sa_set = set_wrapper()
aaa[aa[0]] = sa_set
sa_set.data.add(aa[1])
# Collect all sub-annotations by following the annotation hierarchy for
# every annotation in the annotation set passed.
sa_ids = {}
for a in annotation_set:
# Start with an empty result set for each requested annotation
ls = set()
working_set = set([a])
while working_set:
parent_id = working_set.pop()
# Try to get the sub-annotations for this parent
child_ids = aaa.get(parent_id) or set_wrapper()
for child_id in child_ids.data:
if child_id not in sa_ids:
# Add all children as sub annotations
ls.add(child_id)
working_set.add(child_id)
# Store the result list for this ID
sa_ids[a] = list(ls)
return sa_ids
def create_annotated_entity_list(project, entities_qs, relations, annotations=True):
""" Executes the expected class instance queryset in <entities> and expands
it to aquire more information.
"""
# Cache class name
entities = entities_qs.select_related('class_column')
entity_ids = [e.id for e in entities]
# Make second query to retrieve annotations and skeletons
annotations = ClassInstanceClassInstance.objects.filter(
relation_id = relations['annotated_with'],
class_instance_a__id__in = entity_ids).order_by('id').values_list(
'class_instance_a', 'class_instance_b',
'class_instance_b__name', 'user__id')
annotation_dict = {}
for a in annotations:
if a[0] not in annotation_dict:
annotation_dict[a[0]] = []
annotation_dict[a[0]].append(
{'id': a[1], 'name': a[2], 'uid': a[3]})
# Make third query to retrieve all skeletons and root nodes for entities (if
# they have such).
skeletons = ClassInstanceClassInstance.objects.filter(
relation_id = relations['model_of'],
class_instance_b__in = entity_ids).order_by('id').values_list(
'class_instance_a', 'class_instance_b')
skeleton_dict = {}
for s in skeletons:
if s[1] not in skeleton_dict:
skeleton_dict[s[1]] = []
skeleton_dict[s[1]].append(s[0])
annotated_entities = [];
for e in entities:
class_name = e.class_column.class_name
annotations = annotation_dict[e.id] if e.id in annotation_dict else []
entity_info = {
'id': e.id,
'name': e.name,
'annotations': annotations,
'type': class_name,
}
# Depending on the type of entity, some extra information is added.
if class_name == 'neuron':
entity_info['skeleton_ids'] = skeleton_dict[e.id] \
if e.id in skeleton_dict else []
annotated_entities.append(entity_info)
return annotated_entities
@requires_user_role([UserRole.Browse])
def query_neurons_by_annotations(request, project_id = None):
p = get_object_or_404(Project, pk = project_id)
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
display_start = int(request.POST.get('display_start', 0))
display_length = int(request.POST.get('display_length', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
query = create_basic_annotated_entity_query(p, request.POST, relations,
classes)
query = query.order_by('id').distinct()
# Get total number of results
num_records = query.count()
# Limit and offset result to display range
query = query[display_start:display_start + display_length]
dump = create_annotated_entity_list(p, query, relations)
return HttpResponse(json.dumps({
'entities': dump,
'total_n_records': num_records,
}))
@requires_user_role([UserRole.Browse])
def query_neurons_by_annotations_datatable(request, project_id=None):
p = get_object_or_404(Project, pk = project_id)
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
display_start = int(request.POST.get('iDisplayStart', 0))
display_length = int(request.POST.get('iDisplayLength', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
neuron_query = create_basic_annotated_entity_query(p, request.POST,
relations, classes, allowed_classes=['neuron'])
search_term = request.POST.get('sSearch', '')
if len(search_term) > 0:
neuron_query = neuron_query.filter(name__regex=search_term)
should_sort = request.POST.get('iSortCol_0', False)
if should_sort:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions = [request.POST.get('sSortDir_%d' % d, 'DESC')
for d in range(column_count)]
sorting_directions = map(lambda d: '-' if upper(d) == 'DESC' else '',
sorting_directions)
fields = ['name', 'first_name', 'last_name']
sorting_index = [int(request.POST.get('iSortCol_%d' % d))
for d in range(column_count)]
sorting_cols = map(lambda i: fields[i], sorting_index)
neuron_query = neuron_query.extra(order_by=[di + col for (di, col) in zip(
sorting_directions, sorting_cols)])
# Make sure we get a distinct result (which otherwise might not be the case
# due to the JOINS that are made).
neuron_query = neuron_query.distinct()
# Since it is very likely that there are many neurons, it is more efficient
# to do two queries: 1. Get total number of neurons 2. Get limited set. The
# alternative would be to get all neurons for counting and limiting on the
# Python side. This, however, is too expensive when there are many neurons.
num_records = neuron_query.count()
response = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
'aaData': []
}
entities = create_annotated_entity_list(p,
neuron_query[display_start:display_start + display_length], relations)
for entity in entities:
if entity['type'] == 'neuron':
response['aaData'] += [[
entity['name'],
entity['annotations'],
entity['skeleton_ids'],
entity['id'],
]]
return HttpResponse(json.dumps(response), content_type='text/json')
def _update_neuron_annotations(project_id, user, neuron_id, annotation_map):
""" Ensure that the neuron is annotated_with only the annotations given.
These annotations are expected to come as dictornary of annotation name
versus annotator ID.
"""
qs = ClassInstanceClassInstance.objects.filter(
class_instance_a__id=neuron_id,
relation__relation_name='annotated_with')
qs = qs.select_related('class_instance_b').values_list(
'class_instance_b__name', 'class_instance_b__id')
existing_annotations = dict(qs)
update = set(annotation_map.iterkeys())
existing = set(existing_annotations.iterkeys())
missing = {k:v for k,v in annotation_map.items() if k in update - existing}
_annotate_entities(project_id, [neuron_id], missing)
to_delete = existing - update
to_delete_ids = tuple(aid for name, aid in existing_annotations.iteritems() \
if name in to_delete)
ClassInstanceClassInstance.objects.filter(
class_instance_a_id=neuron_id,
relation__relation_name='annotated_with',
class_instance_b_id__in=to_delete_ids).delete()
def _annotate_entities(project_id, entity_ids, annotation_map):
""" Annotate the entities with the given <entity_ids> with the given
annotations. These annotations are expected to come as dictornary of
annotation name versus annotator ID. A listof all annotation class
instances that have been used is returned. Annotation names can contain the
counting pattern {nX} with X being a number. This will add an incrementing
number starting from X for each entity.
"""
r = Relation.objects.get(project_id = project_id,
relation_name = 'annotated_with')
annotation_class = Class.objects.get(project_id = project_id,
class_name = 'annotation')
annotation_objects = {}
# Create a regular expression to find allowed patterns. The first group is
# the whole {nX} part, while the second group is X only.
counting_pattern = re.compile(r"(\{n(\d+)\})")
for annotation, annotator_id in annotation_map.items():
# Look for patterns, replace all {n} with {n1} to normalize
annotation = annotation.replace("{n}", "{n1}")
# Find all {nX} in the annotation name
expanded_annotations = {}
if counting_pattern.search(annotation):
# Create annotation names based on the counting patterns found, for
# each entitiy.
for i, eid in enumerate(entity_ids):
a = annotation
while True:
# Find next match and cancel if there isn't any
m = counting_pattern.search(a)
if not m:
break
# Replace match
count = int(m.groups()[1]) + i
a = m.string[:m.start()] + str(count) + m.string[m.end():]
# Remember this annotation for the current entity
expanded_annotations[a] = [eid]
else:
# No matches, so use same annotation for all entities
expanded_annotations = {annotation: entity_ids}
# Make sure the annotation's class instance exists.
for a, a_entity_ids in expanded_annotations.iteritems():
ci, created = ClassInstance.objects.get_or_create(
project_id=project_id, name=a,
class_column=annotation_class,
defaults={'user_id': annotator_id})
newly_annotated = set()
# Annotate each of the entities. Don't allow duplicates.
for entity_id in a_entity_ids:
cici, created = ClassInstanceClassInstance.objects.get_or_create(
project_id=project_id, relation=r,
class_instance_a__id=entity_id, class_instance_b=ci,
defaults={'class_instance_a_id': entity_id,
'user_id': annotator_id})
if created:
newly_annotated.add(entity_id)
# Remember which entities got newly annotated
annotation_objects[ci] = newly_annotated
return annotation_objects
@requires_user_role(UserRole.Annotate)
def annotate_entities(request, project_id = None):
p = get_object_or_404(Project, pk = project_id)
# Read keys in a sorted manner
sorted_keys = sorted(request.POST.keys())
annotations = [request.POST[k] for k in sorted_keys
if k.startswith('annotations[')]
meta_annotations = [request.POST[k] for k in sorted_keys
if k.startswith('meta_annotations[')]
entity_ids = [int(request.POST[k]) for k in sorted_keys
if k.startswith('entity_ids[')]
skeleton_ids = [int(request.POST[k]) for k in sorted_keys
if k.startswith('skeleton_ids[')]
if any(skeleton_ids):
skid_to_eid = dict(ClassInstance.objects.filter(project = p,
class_column__class_name = 'neuron',
cici_via_b__relation__relation_name = 'model_of',
cici_via_b__class_instance_a__in = skeleton_ids).values_list(
'cici_via_b__class_instance_a', 'id'))
entity_ids += [skid_to_eid[skid] for skid in skeleton_ids]
# Annotate enties
annotation_map = {a: request.user.id for a in annotations}
annotation_objs = _annotate_entities(project_id, entity_ids, annotation_map)
# Annotate annotations
if meta_annotations:
annotation_ids = [a.id for a in annotation_objs.keys()]
meta_annotation_map = {ma: request.user.id for ma in meta_annotations}
meta_annotation_objs = _annotate_entities(project_id, annotation_ids,
meta_annotation_map)
# Update used annotation objects set
for ma, me in meta_annotation_objs.items():
entities = annotation_objs.get(ma)
if entities:
entities.update(me)
else:
annotation_objs[ma] = me
result = {
'message': 'success',
'annotations': [{'name': a.name, 'id': a.id, 'entities': list(e)} \
for a,e in annotation_objs.items()],
}
return HttpResponse(json.dumps(result), content_type='text/json')
@requires_user_role(UserRole.Annotate)
def remove_annotation(request, project_id=None, annotation_id=None):
""" Removes an annotation from one or more entities.
"""
p = get_object_or_404(Project, pk=project_id)
entity_ids = [int(v) for k,v in request.POST.iteritems()
if k.startswith('entity_ids[')]
# Get CICI instance representing the link
cici_n_a = ClassInstanceClassInstance.objects.filter(project=p,
class_instance_a__id__in=entity_ids,
class_instance_b__id=annotation_id)
# Make sure the current user has permissions to remove the annotation.
missed_cicis = []
cicis_to_delete = []
for cici in cici_n_a:
try:
can_edit_or_fail(request.user, cici.id,
'class_instance_class_instance')
cicis_to_delete.append(cici)
except Exception:
# Remember links for which permissions are missing
missed_cicis.append(cici)
# Remove link between entity and annotation for all links on which the user
# the necessary permissions has.
if cicis_to_delete:
ClassInstanceClassInstance.objects \
.filter(id__in=[cici.id for cici in cicis_to_delete]) \
.delete()
if len(cicis_to_delete) > 1:
message = "Removed annotation from %s entities." % len(cicis_to_delete)
elif len(cicis_to_delete) == 1:
message = "Removed annotation from one entity."
else:
message = "No annotation removed."
if missed_cicis:
message += " Couldn't de-annotate %s entities, due to the lack of " \
"permissions." % len(missed_cicis)
# Remove the annotation class instance, regardless of the owner, if there
# are no more links to it
annotation_links = ClassInstanceClassInstance.objects.filter(project=p,
class_instance_b__id=annotation_id)
num_annotation_links = annotation_links.count()
if num_annotation_links == 0:
ClassInstance.objects.get(pk=annotation_id).delete()
message += " Also removed annotation instance, because it isn't used " \
"anywhere else."
else:
message += " There are %s links left to this annotation." \
% num_annotation_links
return HttpResponse(json.dumps({'message': message}), content_type='text/json')
def create_annotation_query(project_id, param_dict):
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
annotation_query = ClassInstance.objects.filter(project_id=project_id,
class_column__id=classes['annotation'])
# Meta annotations are annotations that are used to annotate other
# annotations.
meta_annotations = [v for k,v in param_dict.iteritems()
if k.startswith('annotations[')]
for meta_annotation in meta_annotations:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a = meta_annotation)
# If information about annotated annotations is found, the current query
# will include only annotations that are meta annotations for it.
annotated_annotations = [v for k,v in param_dict.iteritems()
if k.startswith('annotates[')]
for sub_annotation in annotated_annotations:
annotation_query = annotation_query.filter(
cici_via_a__relation_id = relations['annotated_with'],
cici_via_a__class_instance_b = sub_annotation)
# If parallel_annotations is given, only annotations are returned, that
# are used alongside with these.
parallel_annotations = [v for k,v in param_dict.iteritems()
if k.startswith('parallel_annotations[')]
for p_annotation in parallel_annotations:
annotation_query = annotation_query.filter(
cici_via_b__class_instance_a__cici_via_a__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__cici_via_a__class_instance_b = p_annotation)
# Passing in a user ID causes the result set to only contain annotations
# that are used by the respective user. The query filter could lead to
# duplicate entries, therefore distinct() is added here.
user_id = param_dict.get('user_id', None)
if user_id:
user_id = int(user_id)
annotation_query = annotation_query.filter(
cici_via_b__user__id=user_id).distinct()
# With the help of the neuron_id field, it is possible to restrict the
# result set to only show annotations that are used for a particular neuron.
neuron_id = param_dict.get('neuron_id', None)
if neuron_id:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__id=neuron_id)
# Instead of a neuron a user can also use to skeleton id to constrain the
# annotation set returned. This is implicetely a neuron id restriction.
skeleton_id = param_dict.get('skeleton_id', None)
if skeleton_id:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__cici_via_b__relation_id = relations['model_of'],
cici_via_b__class_instance_a__cici_via_b__class_instance_a__id = skeleton_id)
# If annotations to ignore are passed in, they won't appear in the
# result set.
ignored_annotations = [v for k,v in param_dict.iteritems()
if k.startswith('ignored_annotations[')]
if ignored_annotations:
annotation_query = annotation_query.exclude(
name__in=ignored_annotations)
return annotation_query
def generate_annotation_intersection_query(project_id, annotations):
if not annotations:
return
tables = []
where = []
for i, annotation in enumerate(annotations):
tables.append("""
class_instance c%s,
class_instance_class_instance cc%s""" % (i, i))
where.append("""
AND c%s.name = '%s'
AND c%s.id = cc%s.class_instance_b
AND cc%s.relation_id = r.id""" % (i, annotation, i, i, i))
q = """
SELECT c.id,
c.name
FROM class_instance c,
relation r,
%s
WHERE r.relation_name = 'annotated_with'
AND c.project_id = %s
%s
%s
""" % (',\n '.join(tables),
project_id,
'\n'.join(where),
'\n '.join('AND cc%s.class_instance_a = c.id' % i for i in xrange(len(annotations))))
return q
def generate_co_annotation_query(project_id, co_annotation_ids, classIDs, relationIDs):
if not co_annotation_ids:
return
tables = []
where = []
annotation_class = classIDs['annotation']
annotated_with = relationIDs['annotated_with']
for i, annotation_id in enumerate(co_annotation_ids):
tables.append("""
class_instance a%s,
class_instance_class_instance cc%s""" % (i, i))
where.append("""
AND a%s.project_id = %s
AND a%s.class_id = %s
AND cc%s.class_instance_a = neuron.id
AND cc%s.relation_id = %s
AND cc%s.class_instance_b = a%s.id
AND a%s.id = '%s'
""" % (i, project_id,
i, annotation_class,
i,
i, annotated_with,
i, i,
i, annotation_id))
select = """
SELECT DISTINCT
a.id,
a.name,
(SELECT username FROM auth_user, class_instance_class_instance cici
WHERE cici.class_instance_b = cc.id
AND cici.user_id = auth_user.id
ORDER BY cici.edition_time DESC LIMIT 1) AS "last_user",
(SELECT MAX(edition_time) FROM class_instance_class_instance cici WHERE cici.class_instance_b = a.id) AS "last_used",
(SELECT count(*) FROM class_instance_class_instance cici WHERE cici.class_instance_b = a.id) AS "num_usage"
"""
rest = """
FROM
class_instance a,
class_instance_class_instance cc,
class_instance neuron,
%s
WHERE
neuron.class_id = %s
AND a.class_id = %s
AND a.project_id = %s
AND cc.class_instance_a = neuron.id
AND cc.relation_id = %s
AND cc.class_instance_b = a.id
%s
""" % (',\n'.join(tables),
classIDs['neuron'],
annotation_class,
project_id,
annotated_with,
''.join(where))
return select, rest
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def list_annotations(request, project_id=None):
""" Creates a list of objects containing an annotation name and the user
name and ID of the users having linked that particular annotation.
"""
annotation_query = create_annotation_query(project_id, request.POST)
annotation_tuples = annotation_query.distinct().values_list('name', 'id',
'cici_via_b__user__id', 'cici_via_b__user__username')
# Create a set mapping annotation names to its users
ids = {}
annotation_dict = {}
for annotation, aid, uid, username in annotation_tuples:
ids[aid] = annotation
ls = annotation_dict.get(aid)
if ls is None:
ls = []
annotation_dict[aid] = ls
ls.append({'id': uid, 'name': username})
# Flatten dictionary to list
annotations = tuple({'name': ids[aid], 'id': aid, 'users': users} for aid, users in annotation_dict.iteritems())
return HttpResponse(json.dumps({'annotations': annotations}), content_type="text/json")
def _fast_co_annotations(request, project_id, display_start, display_length):
classIDs = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relationIDs = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
co_annotation_ids = set(int(v) for k, v in request.POST.iteritems() if k.startswith('parallel_annotations'))
select, rest = generate_co_annotation_query(int(project_id), co_annotation_ids, classIDs, relationIDs)
entries = []
search_term = request.POST.get('sSearch', '').strip()
if search_term:
rest += "\nAND a.name ~ %s" # django will escape and quote the string
entries.append(search_term)
# Sorting?
sorting = request.POST.get('iSortCol_0', False)
sorter = ''
if sorting:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions = (request.POST.get('sSortDir_%d' % d, 'DESC') for d in xrange(column_count))
fields = ('a.name', 'last_used', 'num_usage', 'last_user')
sorting_index = (int(request.POST.get('iSortCol_%d' % d)) for d in xrange(column_count))
sorting_cols = (fields[i] for i in sorting_index)
sorter = '\nORDER BY ' + ','.join('%s %s' % u for u in izip(sorting_cols, sorting_directions))
cursor = connection.cursor()
cursor.execute("SELECT count(DISTINCT a.id) " + rest, entries)
num_records = cursor.fetchone()[0]
response = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
}
rest += sorter
rest += '\nLIMIT %s OFFSET %s'
entries.append(display_length) # total to return
entries.append(display_start) # offset
cursor.execute(select + rest, entries)
# 0: a.id
# 1: a.name
# 2: last_user
# 3: last_used
# 4: num_usage
aaData = []
for row in cursor.fetchall():
last_used = row[3]
if last_used:
last_used = last_used.strftime("%Y-%m-%d %H:%M:%S")
else:
last_used = 'never'
aaData.append([row[1], # Annotation name
last_used,
row[4], # Last use
row[2], # Last annotator
row[0]])
response['aaData'] = aaData
return HttpResponse(json.dumps(response), content_type='text/json')
@requires_user_role([UserRole.Browse])
def list_annotations_datatable(request, project_id=None):
display_start = int(request.POST.get('iDisplayStart', 0))
display_length = int(request.POST.get('iDisplayLength', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
# Speed hack
if 'parallel_annotations[0]' in request.POST:
return _fast_co_annotations(request, project_id, display_start, display_length)
annotation_query = create_annotation_query(project_id, request.POST)
should_sort = request.POST.get('iSortCol_0', False)
search_term = request.POST.get('sSearch', '')
# Additional information should also be constrained by neurons and user
# names. E.g., when viewing the annotation list for a user, the usage count
# should only display the number of times the user has used an annotation.
conditions = ""
if request.POST.get('neuron_id'):
conditions += "AND cici.class_instance_a = %s " % \
request.POST.get('neuron_id')
if request.POST.get('user_id'):
conditions += "AND cici.user_id = %s " % \
request.POST.get('user_id')
# Add last used time
annotation_query = annotation_query.extra(
select={'last_used': 'SELECT MAX(edition_time) FROM ' \
'class_instance_class_instance cici WHERE ' \
'cici.class_instance_b = class_instance.id %s' % conditions})
# Add user ID of last user
annotation_query = annotation_query.extra(
select={'last_user': 'SELECT auth_user.id FROM auth_user, ' \
'class_instance_class_instance cici ' \
'WHERE cici.class_instance_b = class_instance.id ' \
'AND cici.user_id = auth_user.id %s' \
'ORDER BY cici.edition_time DESC LIMIT 1' % conditions})
# Add usage count
annotation_query = annotation_query.extra(
select={'num_usage': 'SELECT COUNT(*) FROM ' \
'class_instance_class_instance cici WHERE ' \
'cici.class_instance_b = class_instance.id %s' % conditions})
if len(search_term) > 0:
annotation_query = annotation_query.filter(name__regex=search_term)
if should_sort:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions = [request.POST.get('sSortDir_%d' % d, 'DESC')
for d in xrange(column_count)]
sorting_directions = map(lambda d: '-' if upper(d) == 'DESC' else '',
sorting_directions)
fields = ['name', 'last_used', 'num_usage', 'last_user']
sorting_index = [int(request.POST.get('iSortCol_%d' % d))
for d in xrange(column_count)]
sorting_cols = map(lambda i: fields[i], sorting_index)
annotation_query = annotation_query.extra(order_by=[di + col for (di, col) in zip(
sorting_directions, sorting_cols)])
# We only require ID, name, last used and usage number
annotation_query = annotation_query.values_list(
'id', 'name', 'last_used', 'num_usage', 'last_user')
# Make sure we get a distinct result (which otherwise might not be the case
# due to the JOINS that are made).
annotation_query = annotation_query.distinct()
#num_records = annotation_query.count() # len(annotation_query)
num_records = len(annotation_query)
response = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
'aaData': []
}
for annotation in annotation_query[display_start:display_start + display_length]:
# Format last used time
if annotation[2]:
last_used = annotation[2].strftime("%Y-%m-%d %H:%M:%S")
else:
last_used = 'never'
# Build datatable data structure
response['aaData'].append([
annotation[1], # Name
last_used, # Last used
annotation[3], # Usage
annotation[4], # Annotator ID
annotation[0]]) # ID
return HttpResponse(json.dumps(response), content_type='text/json')
@requires_user_role([UserRole.Browse])
def annotations_for_skeletons(request, project_id=None):
skids = tuple(int(skid) for key, skid in request.POST.iteritems() if key.startswith('skids['))
cursor = connection.cursor()
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='annotated_with'" % int(project_id))
annotated_with_id = cursor.fetchone()[0]
# Select pairs of skeleton_id vs annotation name
cursor.execute('''
SELECT skeleton_neuron.class_instance_a,
annotation.name
FROM class_instance_class_instance skeleton_neuron,
class_instance_class_instance neuron_annotation,
class_instance annotation
WHERE skeleton_neuron.class_instance_a IN (%s)
AND skeleton_neuron.class_instance_b = neuron_annotation.class_instance_a
AND neuron_annotation.relation_id = %s
AND neuron_annotation.class_instance_b = annotation.id
''' % (",".join(map(str, skids)), annotated_with_id))
# Group by skeleton ID
m = defaultdict(list)
for skid, name in cursor.fetchall():
m[skid].append(name)
return HttpResponse(json.dumps(m, separators=(',', ':')))
|
fzadow/CATMAID
|
django/applications/catmaid/control/neuron_annotations.py
|
Python
|
agpl-3.0
| 35,408
|
[
"NEURON"
] |
e28c14c9aca9f854ddb5d6919830ed177a91229411cf7da6d0719929fed42aae
|
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import nn_patterns
import nn_patterns.utils.fileio
import nn_patterns.utils.tests.networks.imagenet
import lasagne
import theano
import imp
base_dir = os.path.dirname(__file__)
eutils = imp.load_source("utils", os.path.join(base_dir, "utils.py"))
param_file = "./imagenet_224_vgg_16.npz"
# Note those weights are CC 4.0:
# See http://www.robots.ox.ac.uk/~vgg/research/very_deep/
param_url = "https://www.dropbox.com/s/cvjj8x19hzya9oe/imagenet_224_vgg_16.npz?dl=1"
pattern_file = "./imagenet_224_vgg_16.pattern_file.A_only.npz"
pattern_url = "https://www.dropbox.com/s/v7e0px44jqwef5k/imagenet_224_vgg_16.patterns.A_only.npz?dl=1"
if __name__ == "__main__":
# Download the necessary parameters for VGG16 and the according patterns.
eutils.download(param_url, param_file)
eutils.download(pattern_url, pattern_file)
# Get some example test set images.
images, label_to_class_name = eutils.get_imagenet_data()
# We want to explain the output neuron with the maximal activation.
target = "max_output"
# Methods we use and some properties.
methods = {
# NAME POSTPROCESSING TITLE (GROUP)INDEX
# Show input.
"input": (eutils.original_image, ("", "Input"), (0, 0) ),
# Function
"gradient": (eutils.back_projection, ("", "Gradient"), (1, 1) ),
# Signal
"deconvnet": (eutils.back_projection, ("", "DeConvNet"), (2, 2) ),
"guided": (eutils.back_projection, ("Guided",
"Backprop"), (2, 3) ),
"patternnet": (eutils.back_projection, ("PatterNet",
"($S_{a+-}$)"), (2, 4) ),
# Interaction
"patternlrp": (eutils.heatmap, ("PatternLRP",
"($S_{a+-}$)"), (3, 5) ),
"lrp.z": (eutils.heatmap, ("", "LRP"), (3, 6) ),
}
###########################################################################
# Build model.
###########################################################################
parameters = nn_patterns.utils.fileio.load_parameters(param_file)
vgg16 = nn_patterns.utils.tests.networks.imagenet.vgg16()
lasagne.layers.set_all_param_values(vgg16["out"], parameters)
# Create prediction model.
predict_f = theano.function([vgg16["input_var"]],
lasagne.layers.get_output(vgg16["out"],
deterministic=True))
###########################################################################
# Explanations.
###########################################################################
# Create explainers.
patterns = nn_patterns.utils.fileio.load_patterns(pattern_file)
explainers = {}
for method in methods:
explainers[method] = nn_patterns.create_explainer(method,
vgg16["out"],
patterns=[patterns])
# Create explanations.
explanations = np.zeros([len(images), len(explainers), 3, 224, 224])
text = []
for i, (image, y) in enumerate(images):
# Predict label.
x = eutils.preprocess(image)[None, :, :, :]
prob = predict_f(x)[0]
y_hat = prob.argmax()
text.append((r"\textbf{%s}" % label_to_class_name[y],
r"\textit{(%.2f)}" % prob.max(),
r"\textit{%s}" % label_to_class_name[y_hat]))
for eid in explainers:
# Explain.
e = explainers[eid].explain(x, target=target)[0]
# Postprocess.
e = methods[eid][0](e)
explanations[i, methods[eid][-1][1]] = e
###########################################################################
# Plot the explanations.
###########################################################################
n_samples = len(images)
n_padding = n_samples-1
per_image = 3.2
shape_per_image = [s + n_padding for s in (224, 224)]
big_image = np.ones((3,
n_padding + n_samples * shape_per_image[1],
n_padding + (3+len(methods)) * shape_per_image[0]),
dtype=np.float32)
for i, _ in enumerate(images):
for eid in explainers:
egr_idx, e_idx = methods[eid][-1]
big_image = eutils.put_into_big_image(explanations[i, e_idx],
big_image, i,
e_idx + egr_idx,
n_padding)
group_fontsize = 20
fontsize = 15
plt.figure(figsize=(n_samples * per_image,
(3 + len(methods)) * per_image),
dpi=224)
plt.clf()
plt.imshow(big_image.transpose(1, 2, 0), interpolation="nearest")
plt.tick_params(axis="x", which="both",
bottom="off", top="off", labelbottom="off")
plt.tick_params(axis="y", which="both",
bottom="off", top="off", labelbottom="off")
plt.axis("off")
plt.rc("text", usetex=True)
plt.rc("font", family="sans-serif")
# Plot the labels and probability.
for i, s_list in enumerate(text):
for s, offset in zip(s_list, [-50, 0, 50]):
plt.text(-120,
(offset + n_padding + shape_per_image[0]
// 2 + shape_per_image[0] * i),
s, fontsize=fontsize, ha="center")
# Plot the methods names.
for eid in methods:
egr_idx, e_idx = methods[eid][-1]
s1, s2 = methods[eid][1]
plt.text((n_padding + shape_per_image[0] // 2
+ shape_per_image[0] * (e_idx+egr_idx)),
-70, s1, fontsize=fontsize, ha="center")
plt.text((n_padding + shape_per_image[0] // 2
+ shape_per_image[0] * (e_idx+egr_idx)),
-20, s2, fontsize=fontsize, ha="center")
# Plot group titles.
for txt, loc in [("function", 5), ("signal", 11), ("interaction", 18)]:
plt.text(loc * shape_per_image[0] // 2, -160,
r"\textbf{%s}" % txt, fontsize=group_fontsize, ha="center",
va="center", color="gray")
plt.savefig("all_methods.pdf")
|
pikinder/nn-patterns
|
examples/all_methods.py
|
Python
|
mit
| 6,964
|
[
"NEURON"
] |
cc8249e99d5fb04c1a637ed239f1ed8eddc9b691deb1322522ae0d0db4ec320d
|
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Package Manager Client Core Interface}.
"""
import os
import shutil
import threading
from entropy.core import Singleton
from entropy.locks import EntropyResourcesLock
from entropy.fetchers import UrlFetcher, MultipleUrlFetcher
from entropy.output import TextInterface, bold, red, darkred, blue
from entropy.qa import QAInterface
from entropy.security import System, Repository as RepositorySecurity
from entropy.spm.plugins.factory import get_default_instance as get_spm, \
get_default_class as get_spm_default_class
from entropy.client.interfaces.db import InstalledPackagesRepository
from entropy.client.interfaces.dep import CalculatorsMixin
from entropy.client.interfaces.methods import RepositoryMixin, MiscMixin, \
MatchMixin
from entropy.client.interfaces.package import PackageActionFactory
from entropy.client.interfaces.repository import Repository
from entropy.client.interfaces.settings import ClientSystemSettingsPlugin
from entropy.client.interfaces.sets import Sets
from entropy.client.misc import sharedinstlock, ConfigurationUpdates
from entropy.client.services.interfaces import \
ClientWebServiceFactory, RepositoryWebServiceFactory
from entropy.const import etpConst, const_debug_write, \
const_convert_to_unicode, const_setup_perms
from entropy.core.settings.base import SystemSettings
from entropy.misc import LogFile
from entropy.cache import EntropyCacher
from entropy.i18n import _
import entropy.dump
import entropy.dep
import entropy.tools
class Client(Singleton, TextInterface, CalculatorsMixin,
RepositoryMixin, MiscMixin, MatchMixin):
def init_singleton(self, indexing = True, installed_repo = None,
xcache = True, user_xcache = False, repo_validation = True,
url_fetcher = None, multiple_url_fetcher = None, **kwargs):
"""
Entropy Client Singleton interface. Your hitchhikers' guide to the
Galaxy.
@keyword indexing: enable metadata indexing (default is True)
@type indexing: bool
@keyword installed_repo: open installed packages repository? (default
is True). Accepted values: True = open, False = open but consider
it not available, -1 = do not even try to open
@type installed_repo: bool or int
@keyword xcache: enable on-disk cache (default is True)
@type xcache: bool
@keyword user_xcache: enable on-disk cache even for users not in the
entropy group (default is False). Dangerous, could lead to cache
inconsistencies.
@type user_xcache: bool
@keyword repo_validation: validate all the available repositories
and automatically exclude the faulty ones
@type repo_validation: bool
@keyword url_fetcher: override default entropy.fetchers.UrlFetcher
class usage. Provide your own implementation of UrlFetcher using
this argument.
@type url_fetcher: class or None
@keyword multiple_url_fetcher: override default
entropy.fetchers.MultipleUrlFetcher class usage. Provide your own
implementation of MultipleUrlFetcher using this argument.
"""
self.__post_acquire_hook_idx = None
self.__instance_destroyed = False
self._repo_error_messages_cache = set()
self._repodb_cache = {}
self._repodb_cache_mutex = threading.RLock()
self._memory_db_instances = {}
self._real_installed_repository = None
self._real_installed_repository_lock = threading.RLock()
self._treeupdates_repos = set()
self._can_run_sys_set_hooks = False
const_debug_write(__name__, "debug enabled")
self.safe_mode = 0
self._indexing = indexing
self._repo_validation = repo_validation
self._real_cacher = None
self._real_cacher_lock = threading.RLock()
# setup package settings (masking and other stuff)
self._real_settings = None
self._real_settings_lock = threading.RLock()
self._real_settings_client_plg = None
self._real_settings_client_plg_lock = threading.RLock()
self._real_logger = None
self._real_logger_lock = threading.RLock()
self._real_enabled_repos = None
self._real_enabled_repos_lock = threading.RLock()
self._multiple_url_fetcher = multiple_url_fetcher
self._url_fetcher = url_fetcher
if url_fetcher is None:
self._url_fetcher = UrlFetcher
if multiple_url_fetcher is None:
self._multiple_url_fetcher = MultipleUrlFetcher
self._do_open_installed_repo = True
self._installed_repo_enable = True
if installed_repo in (True, None, 1):
self._installed_repo_enable = True
elif installed_repo in (False, 0):
self._installed_repo_enable = False
elif installed_repo == -1:
self._installed_repo_enable = False
self._do_open_installed_repo = False
self.xcache = xcache
shell_xcache = os.getenv("ETP_NOCACHE")
if shell_xcache:
self.xcache = False
# now if we are on live, we should disable it
# are we running on a livecd? (/proc/cmdline has "cdroot")
if entropy.tools.islive():
self.xcache = False
elif (not entropy.tools.is_user_in_entropy_group()) and not user_xcache:
self.xcache = False
# Add Entropy Resources Lock post-acquire hook that cleans
# repository caches.
hook_ref = EntropyResourcesLock.add_post_acquire_hook(
self._resources_post_hook)
self.__post_acquire_hook_idx = hook_ref
# enable System Settings hooks
self._can_run_sys_set_hooks = True
const_debug_write(__name__, "singleton loaded")
@property
def _settings(self):
"""
Return a SystemSettings object instance.
"""
with self._real_settings_lock:
if self._real_settings is None:
self._real_settings = SystemSettings()
const_debug_write(__name__, "SystemSettings loaded")
# add our SystemSettings plugin
# Make sure we connect Entropy Client plugin
# AFTER client db init
self._real_settings.add_plugin(
self._settings_client_plugin)
return self._real_settings
@property
def _settings_client_plugin(self):
"""
Return the SystemSettings Entropy Client plugin.
"""
with self._real_settings_client_plg_lock:
if self._real_settings_client_plg is None:
plugin = ClientSystemSettingsPlugin(self)
self._real_settings_client_plg = plugin
return self._real_settings_client_plg
@property
def _cacher(self):
"""
Return an EntropyCacher object instance.
"""
with self._real_cacher_lock:
if self._real_cacher is None:
real_cacher = EntropyCacher()
const_debug_write(__name__, "EntropyCacher loaded")
# needs to be started here otherwise repository
# cache will be always dropped
if self.xcache:
real_cacher.start()
else:
# disable STASHING_CACHE or we leak
EntropyCacher.STASHING_CACHE = False
self._real_cacher = real_cacher
return self._real_cacher
@property
def logger(self):
"""
Return the Entropy Client Logger instance.
"""
with self._real_logger_lock:
if self._real_logger is None:
real_logger = LogFile(
level = self._settings['system']['log_level'],
filename = etpConst['entropylogfile'],
header = "[client]")
const_debug_write(__name__, "Logger loaded")
self._real_logger = real_logger
return self._real_logger
@property
def _enabled_repos(self):
with self._real_enabled_repos_lock:
if self._real_enabled_repos is None:
real_enabled_repos = []
if self._repo_validation:
self._validate_repositories(
enabled_repos = real_enabled_repos)
else:
real_enabled_repos.extend(
self._settings['repositories']['order'])
self._real_enabled_repos = real_enabled_repos
return self._real_enabled_repos
def _resources_post_hook(self):
"""
Hook running after Entropy Resources Lock acquisition.
This method takes care of the repository memory caches, by
invalidating it.
"""
with self._real_installed_repository_lock:
if self._real_installed_repository is not None:
self._real_installed_repository.clearCache()
with self._repodb_cache_mutex:
for repo in self._repodb_cache.values():
repo.clearCache()
def destroy(self, _from_shutdown = False):
"""
Destroy this Singleton instance, closing repositories, removing
SystemSettings plugins added during instance initialization.
This method should be always called when instance is not used anymore.
"""
self.__instance_destroyed = True
if self.__post_acquire_hook_idx is not None:
EntropyResourcesLock.remove_post_acquire_hook(
self.__post_acquire_hook_idx)
self.__post_acquire_hook_idx = None
if hasattr(self, '_installed_repository'):
inst_repo = self.installed_repository()
if inst_repo is not None:
inst_repo.close(_token = InstalledPackagesRepository.NAME)
if hasattr(self, '_real_logger_lock'):
with self._real_logger_lock:
if self._real_logger is not None:
self._real_logger.close()
if not _from_shutdown:
if hasattr(self, '_real_settings') and \
hasattr(self._real_settings, 'remove_plugin'):
# shutdown() will terminate the whole process
# so there is no need to remove plugins from
# SystemSettings, it wouldn't make any diff.
if self._real_settings is not None:
try:
self._real_settings.remove_plugin(
ClientSystemSettingsPlugin.ID)
except KeyError:
pass
self.close_repositories(mask_clear = False)
def shutdown(self):
"""
This method should be called when the whole process is going to be
killed. It calls destroy() and stops any running thread
"""
self._cacher.sync() # enforce, destroy() may kill the current content
self.destroy(_from_shutdown = True)
self._cacher.stop()
entropy.tools.kill_threads()
@sharedinstlock
def repository_packages_spm_sync(self, repository_identifier, repo_db,
force = False):
"""
Service method used to sync package names with Source Package Manager
via metadata stored in Repository dbs collected at server-time.
Source Package Manager can change package names, categories or slot
and Entropy repositories must be kept in sync.
In other words, it checks for /usr/portage/profiles/updates changes,
of course indirectly, since there is no way entropy.client can directly
depend on Portage.
@param repository_identifier: repository identifier which repo_db
parameter is bound
@type repository_identifier: string
@param repo_db: repository database instance
@type repo_db: entropy.db.EntropyRepository
@return: bool stating if changes have been made
@rtype: bool
"""
inst_repo = self.installed_repository()
if not inst_repo:
# nothing to do if client db is not availabe
return False
self._treeupdates_repos.add(repository_identifier)
do_rescan = False
shell_rescan = os.getenv("ETP_TREEUPDATES_RESCAN")
if shell_rescan:
do_rescan = True
# check database digest
stored_digest = repo_db.retrieveRepositoryUpdatesDigest(
repository_identifier)
if stored_digest == -1:
do_rescan = True
# check stored value in client database
client_digest = "0"
if not do_rescan:
client_digest = \
inst_repo.retrieveRepositoryUpdatesDigest(
repository_identifier)
if do_rescan or (str(stored_digest) != str(client_digest)) or force:
# reset database tables
inst_repo.clearTreeupdatesEntries(
repository_identifier)
# load updates
update_actions = repo_db.retrieveTreeUpdatesActions(
repository_identifier)
# now filter the required actions
update_actions = inst_repo.filterTreeUpdatesActions(
update_actions)
if update_actions:
mytxt = "%s: %s." % (
bold(_("ATTENTION")),
red(_("forcing packages metadata update")),
)
self.output(
mytxt,
importance = 1,
level = "info",
header = darkred(" * ")
)
mytxt = "%s %s." % (
red(_("Updating system database using repository")),
blue(repository_identifier),
)
self.output(
mytxt,
importance = 1,
level = "info",
header = darkred(" * ")
)
# run stuff
inst_repo.runTreeUpdatesActions(
update_actions)
# store new digest into database
inst_repo.setRepositoryUpdatesDigest(
repository_identifier, stored_digest)
# store new actions
inst_repo.addRepositoryUpdatesActions(
InstalledPackagesRepository.NAME, update_actions,
self._settings['repositories']['branch'])
inst_repo.commit()
# clear client cache
inst_repo.clearCache()
return True
def is_destroyed(self):
return self.__instance_destroyed
def clear_cache(self):
"""
Clear all the Entropy default cache directory. This function is
fault tolerant and will never return any exception.
"""
with self._cacher:
# no data is written while holding self._cacher by the balls
# drop all the buffers then remove on-disk data
self._cacher.discard()
# clear repositories live cache
inst_repo = self.installed_repository()
if inst_repo is not None:
inst_repo.clearCache()
with self._repodb_cache_mutex:
for repo in self._repodb_cache.values():
repo.clearCache()
cache_dir = self._cacher.current_directory()
try:
shutil.rmtree(cache_dir, True)
except (shutil.Error, IOError, OSError):
return
try:
os.makedirs(cache_dir, 0o775)
except (IOError, OSError):
return
try:
const_setup_perms(cache_dir, etpConst['entropygid'])
except (IOError, OSError):
return
def QA(self):
"""
Load Entropy QA interface object
@rtype: entropy.qa.QAInterface
"""
qa_intf = QAInterface()
qa_intf.output = self.output
qa_intf.ask_question = self.ask_question
qa_intf.input_box = self.input_box
qa_intf.set_title = self.set_title
return qa_intf
def Settings(self):
"""
Return SystemSettings instance object
"""
return self._settings
def ClientSettings(self):
"""
Return SystemSettings Entropy Client plugin metadata dictionary
"""
p_id = ClientSystemSettingsPlugin.ID
return self._settings[p_id]
def Cacher(self):
"""
Return EntropyCacher instance object
@return: EntropyCacher instance object
@rtype: entropy.cache.EntropyCacher
"""
return self._cacher
def PackageActionFactory(self):
"""
Load Entropy PackageActionFactory instance object
"""
return PackageActionFactory(self)
def ConfigurationUpdates(self):
"""
Return Entropy Configuration File Updates management object.
"""
return ConfigurationUpdates(self)
def Spm(self):
"""
Load Source Package Manager instance object
"""
return get_spm(self)
def Spm_class(self):
"""
Load Source Package Manager default plugin class
"""
return get_spm_default_class()
def Repositories(self, *args, **kwargs):
"""
Load Entropy Repositories manager instance object
@return: Repository instance object
@rtype: entropy.client.interfaces.repository.Repository
"""
client_data = self.ClientSettings()['misc']
kwargs['gpg'] = client_data['gpg']
return Repository(self, *args, **kwargs)
def Security(self, *args, **kwargs):
"""
Load Entropy Security Advisories interface object
@return: Repository Security instance object
@rtype: entropy.security.System
"""
return System(self, *args, **kwargs)
def RepositorySecurity(self, keystore_dir = None):
"""
Load Entropy Repository Security interface object
@return: Repository Repository Security instance object
@rtype: entropy.security.Repository
@raise RepositorySecurity.GPGError: GPGError based instances in case
of problems.
"""
if keystore_dir is None:
keystore_dir = etpConst['etpclientgpgdir']
return RepositorySecurity(keystore_dir = keystore_dir)
def Sets(self):
"""
Load Package Sets interface object
@return: Sets instance object
@rtype: entropy.client.interfaces.sets.Sets
"""
return Sets(self)
def WebServices(self):
"""
Load the Entropy Web Services Factory interface, that can be used
to obtain a WebService object that is able to communicate with
repository remote services, if available.
@return: WebServicesFactory instance object
@rtype: entropy.client.services.interfaces.WebServicesFactory
"""
return ClientWebServiceFactory(self)
def RepositoryWebServices(self):
"""
Load the Repository Entropy Web Services Factory interface, that can
be used to obtain a RepositoryWebService object that is able to
communicate with repository remote services, querying for package
metadata and general repository status.
@return: RepositoryWebServiceFactory instance object
@rtype: entropy.client.services.interfaces.RepositoryWebServiceFactory
"""
return RepositoryWebServiceFactory(self)
|
Sabayon/entropy
|
lib/entropy/client/interfaces/client.py
|
Python
|
gpl-2.0
| 19,884
|
[
"Galaxy"
] |
85c67e23cd603b2cdda5af84eb39c868c6d46fef0355ebda9730ff820ba13971
|
""" The Job Scheduling Executor takes the information gained from all previous
optimizers and makes a scheduling decision for the jobs.
Subsequent to this jobs are added into a Task Queue and pilot agents can be submitted.
All issues preventing the successful resolution of a site candidate are discovered
here where all information is available.
This Executor will fail affected jobs meaningfully.
"""
__RCSID__ = "$Id: $"
import random
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Utilities.Time import fromString, toEpoch
from DIRAC.Core.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient, getFilesToStage
from DIRAC.WorkloadManagementSystem.Executor.Base.OptimizerExecutor import OptimizerExecutor
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
class JobScheduling(OptimizerExecutor):
"""
The specific Optimizer must provide the following methods:
- optimizeJob() - the main method called for each job
and it can provide:
- initializeOptimizer() before each execution cycle
"""
@classmethod
def initializeOptimizer(cls):
""" Initialization of the optimizer.
"""
cls.siteClient = SiteStatus()
cls.__jobDB = JobDB()
return S_OK()
def optimizeJob(self, jid, jobState):
""" 1. Banned sites are removed from the destination list.
2. Get input files
3. Production jobs are sent directly to TQ
4. Check if staging is necessary
"""
# Reschedule delay
result = jobState.getAttributes(['RescheduleCounter', 'RescheduleTime', 'ApplicationStatus'])
if not result['OK']:
return result
attDict = result['Value']
try:
reschedules = int(attDict['RescheduleCounter'])
except (ValueError, KeyError):
return S_ERROR("RescheduleCounter has to be an integer")
if reschedules != 0:
delays = self.ex_getOption('RescheduleDelays', [60, 180, 300, 600])
delay = delays[min(reschedules, len(delays) - 1)]
waited = toEpoch() - toEpoch(fromString(attDict['RescheduleTime']))
if waited < delay:
return self.__holdJob(jobState, 'On Hold: after rescheduling %s' % reschedules, delay)
# Get the job manifest for the later checks
result = jobState.getManifest()
if not result['OK']:
return S_ERROR("Could not retrieve job manifest: %s" % result['Message'])
jobManifest = result['Value']
# Get site requirements
result = self.__getSitesRequired(jobManifest)
if not result['OK']:
return result
userSites, userBannedSites = result['Value']
# Get job type
result = jobState.getAttribute("JobType")
if not result['OK']:
return S_ERROR("Could not retrieve job type")
jobType = result['Value']
# Get banned sites from DIRAC
result = self.siteClient.getSites('Banned')
if not result['OK']:
return S_ERROR("Cannot retrieve banned sites from JobDB")
wmsBannedSites = result['Value']
# If the user has selected any site, filter them and hold the job if not able to run
if userSites:
if jobType not in self.ex_getOption('ExcludedOnHoldJobTypes', []):
result = self.siteClient.getUsableSites(userSites)
if not result['OK']:
return S_ERROR("Problem checking userSites for tuple of active/banned/invalid sites")
usableSites = set(result['Value'])
bannedSites = []
invalidSites = []
for site in userSites:
if site in wmsBannedSites:
bannedSites.append(site)
elif site not in usableSites:
invalidSites.append(site)
if invalidSites:
self.jobLog.debug("Invalid site(s) requested: %s" % ','.join(invalidSites))
if not self.ex_getOption('AllowInvalidSites', True):
return self.__holdJob(jobState, "Requested site(s) %s are invalid" % ",".join(invalidSites))
if bannedSites:
self.jobLog.debug("Banned site(s) %s ignored" % ",".join(bannedSites))
if not usableSites:
return self.__holdJob(jobState, "Requested site(s) %s are inactive" % ",".join(bannedSites))
if not usableSites:
return self.__holdJob(jobState, "No requested site(s) are active/valid")
userSites = list(usableSites)
checkPlatform = self.ex_getOption('CheckPlatform', False)
jobPlatform = jobManifest.getOption("Platform", None)
# First check that the platform is valid (in OSCompatibility list)
if checkPlatform and jobPlatform:
result = gConfig.getOptionsDict('/Resources/Computing/OSCompatibility')
if not result['OK']:
return S_ERROR("Unable to get OSCompatibility list")
allPlatforms = result['Value']
if jobPlatform not in allPlatforms:
self.jobLog.error("Platform not supported", jobPlatform)
return S_ERROR("Platform %s is not supported" % jobPlatform)
# Filter the userSites by the platform selection (if there is one)
if checkPlatform and userSites:
if jobPlatform:
result = self.__filterByPlatform(jobPlatform, userSites)
if not result['OK']:
self.jobLog.error("Failed to filter job sites by platform", result['Message'])
return S_ERROR("Failed to filter job sites by platform")
userSites = result['Value']
if not userSites:
# No sites left after filtering -> Invalid platform/sites combination
self.jobLog.error("No selected sites match platform", jobPlatform)
return S_ERROR("No selected sites match platform '%s'" % jobPlatform)
# Check if there is input data
result = jobState.getInputData()
if not result['OK']:
self.jobLog.error("Cannot get input data", result['Message'])
return S_ERROR("Failed to get input data from JobDB")
if not result['Value']:
# No input data? Just send to TQ
return self.__sendToTQ(jobState, jobManifest, userSites, userBannedSites)
self.jobLog.verbose("Has an input data requirement")
inputData = result['Value']
# ===================================================================================
# Production jobs are sent to TQ, but first we have to verify if staging is necessary
# ===================================================================================
if jobType in Operations().getValue('Transformations/DataProcessing', []):
self.jobLog.info("Production job: sending to TQ, but first checking if staging is requested")
res = getFilesToStage(inputData,
jobState=jobState,
checkOnlyTapeSEs=self.ex_getOption('CheckOnlyTapeSEs', True),
jobLog=self.jobLog)
if not res['OK']:
return self.__holdJob(jobState, res['Message'])
if res['Value']['absentLFNs']:
# Some files do not exist at all... set the job Failed
# Reverse errors
reasons = {}
for lfn, reason in res['Value']['absentLFNs'].iteritems():
reasons.setdefault(reason, []).append(lfn)
for reason, lfns in reasons.iteritems():
# Some files are missing in the FC or in SEs, fail the job
self.jobLog.error(reason, ','.join(lfns))
error = ','.join(reasons)
return S_ERROR(error)
if res['Value']['failedLFNs']:
return self.__holdJob(jobState, "Couldn't get storage metadata of some files")
stageLFNs = res['Value']['offlineLFNs']
if stageLFNs:
res = self.__checkStageAllowed(jobState)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Stage not allowed")
self.__requestStaging(jobState, stageLFNs)
return S_OK()
else:
# No staging required
onlineSites = res['Value']['onlineSites']
if onlineSites:
# Set the online site(s) first
userSites = set(userSites)
onlineSites &= userSites
userSites = list(onlineSites) + list(userSites - onlineSites)
return self.__sendToTQ(jobState, jobManifest, userSites, userBannedSites, onlineSites=onlineSites)
# ===================================================
# From now on we know it's a user job with input data
# ===================================================
idAgent = self.ex_getOption('InputDataAgent', 'InputData')
result = self.retrieveOptimizerParam(idAgent)
if not result['OK']:
self.jobLog.error("Could not retrieve input data info", result['Message'])
return S_ERROR("Could not retrieve input data info")
opData = result['Value']
if 'SiteCandidates' not in opData:
return S_ERROR("No possible site candidates")
# Filter input data sites with user requirement
siteCandidates = list(opData['SiteCandidates'])
self.jobLog.info("Site candidates are %s" % siteCandidates)
if userSites:
siteCandidates = list(set(siteCandidates) & set(userSites))
siteCandidates = self._applySiteFilter(siteCandidates, banned=userBannedSites)
if not siteCandidates:
return S_ERROR("Impossible InputData * Site requirements")
idSites = {}
for site in siteCandidates:
idSites[site] = opData['SiteCandidates'][site]
# Check if sites have correct count of disk+tape replicas
numData = len(inputData)
errorSites = set()
for site in idSites:
if numData != idSites[site]['disk'] + idSites[site]['tape']:
self.jobLog.error("Site candidate %s does not have all the input data" % site)
errorSites.add(site)
for site in errorSites:
idSites.pop(site)
if not idSites:
return S_ERROR("Site candidates do not have all the input data")
# Check if staging is required
stageRequired, siteCandidates = self.__resolveStaging(inputData, idSites)
if not siteCandidates:
return S_ERROR("No destination sites available")
# Is any site active?
stageSites = self._applySiteFilter(siteCandidates, banned=wmsBannedSites)
if not stageSites:
return self.__holdJob(jobState, "Sites %s are inactive or banned" % ", ".join(siteCandidates))
# If no staging is required send to TQ
if not stageRequired:
# Use siteCandidates and not stageSites because active and banned sites
# will be taken into account on matching time
return self.__sendToTQ(jobState, jobManifest, siteCandidates, userBannedSites)
# Check if the user is allowed to stage
if self.ex_getOption("RestrictDataStage", False):
res = self.__checkStageAllowed(jobState)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Stage not allowed")
# Get stageSites[0] because it has already been randomized and it's as good as any in stageSites
stageSite = stageSites[0]
self.jobLog.verbose(" Staging site will be %s" % (stageSite))
stageData = idSites[stageSite]
# Set as if everything has already been staged
stageData['disk'] += stageData['tape']
stageData['tape'] = 0
# Set the site info back to the original dict to save afterwards
opData['SiteCandidates'][stageSite] = stageData
stageRequest = self.__preRequestStaging(jobManifest, stageSite, opData)
if not stageRequest['OK']:
return stageRequest
stageLFNs = stageRequest['Value']
result = self.__requestStaging(jobState, stageLFNs)
if not result['OK']:
return result
stageLFNs = result['Value']
self.__updateSharedSESites(jobManifest, stageSite, stageLFNs, opData)
# Save the optimizer data again
self.jobLog.verbose('Updating %s Optimizer Info:' % (idAgent), opData)
result = self.storeOptimizerParam(idAgent, opData)
if not result['OK']:
return result
return self.__setJobSite(jobState, stageSites)
def _applySiteFilter(self, sites, banned=False):
""" Filters out banned sites
"""
if not sites:
return sites
filtered = set(sites)
if banned and isinstance(banned, (list, set, dict)):
filtered -= set(banned)
return list(filtered)
def __holdJob(self, jobState, holdMsg, delay=0):
if delay:
self.freezeTask(delay)
else:
self.freezeTask(self.ex_getOption("HoldTime", 300))
self.jobLog.info("On hold -> %s" % holdMsg)
return jobState.setAppStatus(holdMsg, source=self.ex_optimizerName())
def __getSitesRequired(self, jobManifest):
"""Returns any candidate sites specified by the job or sites that have been
banned and could affect the scheduling decision.
"""
bannedSites = jobManifest.getOption("BannedSites", [])
if not bannedSites:
bannedSites = jobManifest.getOption("BannedSite", [])
if bannedSites:
self.jobLog.info("Banned %s sites" % ", ".join(bannedSites))
sites = jobManifest.getOption("Site", [])
# TODO: Only accept known sites after removing crap like ANY set in the original manifest
sites = [site for site in sites if site.strip().lower() not in ("any", "")]
if sites:
if len(sites) == 1:
self.jobLog.info("Single chosen site %s specified" % (sites[0]))
else:
self.jobLog.info("Multiple sites requested: %s" % ','.join(sites))
sites = self._applySiteFilter(sites, banned=bannedSites)
if not sites:
return S_ERROR("Impossible site requirement")
return S_OK((sites, bannedSites))
def __filterByPlatform(self, jobPlatform, userSites):
""" Filters out sites that have no CE with a matching platform.
"""
basePath = "/Resources/Sites"
filteredSites = set()
for site in userSites:
if "." not in site:
# Invalid site name: Doesn't contain a dot!
self.jobLog.info("Skipped invalid site name: %s" % site)
continue
grid = site.split('.')[0]
sitePath = cfgPath(basePath, grid, site, "CEs")
result = gConfig.getSections(sitePath)
if not result['OK']:
self.jobLog.info("Failed to get CEs at site %s." % site)
continue
siteCEs = result['Value']
for CEName in siteCEs:
CEPlatform = gConfig.getValue(cfgPath(sitePath, CEName, "OS"))
if jobPlatform == CEPlatform:
# Site has a CE with a matchin platform
filteredSites.add(site)
return S_OK(list(filteredSites))
def _getTagsFromManifest(self, jobManifest):
""" helper method to add a list of tags to the TQ from the job manifest content
"""
# Generate Tags from specific requirements
tagList = []
# sorting out the number of processors
nProcessors = 1
maxProcessors = 1
if "NumberOfProcessors" in jobManifest: # this should be the exact number
nProcessors = jobManifest.getOption("NumberOfProcessors", 0)
else: # is there a min? and in that case, is there a max?
if "MinNumberOfProcessors" in jobManifest:
nProcessors = jobManifest.getOption("MinNumberOfProcessors", 0)
if "MaxNumberOfProcessors" in jobManifest:
maxProcessors = jobManifest.getOption("MaxNumberOfProcessors", 0)
else:
maxProcessors = -1
if nProcessors > 1:
tagList.append("%dProcessors" % nProcessors)
tagList.append("MultiProcessor")
if maxProcessors == -1 or maxProcessors > 1:
tagList.append("MultiProcessor")
if "WholeNode" in jobManifest:
if jobManifest.getOption("WholeNode", "").lower() in ["1", "yes", "true", "y"]:
tagList.append("WholeNode")
tagList.append("MultiProcessor")
# sorting out the RAM (this should be probably coded ~same as number of processors)
if "MaxRAM" in jobManifest:
maxRAM = jobManifest.getOption("MaxRAM", 0)
if maxRAM:
tagList.append("%dGB" % maxRAM)
# other tags? Just add them
if "Tags" in jobManifest:
tagList.extend(jobManifest.getOption("Tags", []))
if "Tag" in jobManifest:
tagList.extend(jobManifest.getOption("Tag", []))
return tagList
def __sendToTQ(self, jobState, jobManifest, sites, bannedSites, onlineSites=None):
"""This method sends jobs to the task queue agent and if candidate sites
are defined, updates job JDL accordingly.
"""
tagList = self._getTagsFromManifest(jobManifest)
if tagList:
jobManifest.setOption("Tags", ", ".join(tagList))
reqSection = "JobRequirements"
if reqSection in jobManifest:
result = jobManifest.getSection(reqSection)
else:
result = jobManifest.createSection(reqSection)
if not result['OK']:
self.jobLog.error("Cannot create %s: %s" % reqSection, result['Value'])
return S_ERROR("Cannot create %s in the manifest" % reqSection)
reqCfg = result['Value']
if sites:
reqCfg.setOption("Sites", ", ".join(sites))
if bannedSites:
reqCfg.setOption("BannedSites", ", ".join(bannedSites))
# Job multivalue requirement keys are specified as singles in the job descriptions
# but for backward compatibility can be also plurals
for key in ('SubmitPools', "SubmitPool", "GridMiddleware", "PilotTypes", "PilotType",
"JobType", "GridRequiredCEs", "GridCE", "Tags"):
reqKey = key
if key == "JobType":
reqKey = "JobTypes"
elif key == "GridRequiredCEs" or key == "GridCE":
reqKey = "GridCEs"
elif key == "SubmitPools" or key == "SubmitPool":
reqKey = "SubmitPools"
elif key == "PilotTypes" or key == "PilotType":
reqKey = "PilotTypes"
if key in jobManifest:
reqCfg.setOption(reqKey, ", ".join(jobManifest.getOption(key, [])))
result = self.__setJobSite(jobState, sites, onlineSites=onlineSites)
if not result['OK']:
return result
self.jobLog.info("Done")
return self.setNextOptimizer(jobState)
def __resolveStaging(self, inputData, idSites):
diskSites = []
maxOnDisk = 0
bestSites = []
for site in idSites:
nTape = idSites[site]['tape']
nDisk = idSites[site]['disk']
if nTape > 0:
self.jobLog.verbose("%s tape replicas on site %s" % (nTape, site))
if nDisk > 0:
self.jobLog.verbose("%s disk replicas on site %s" % (nDisk, site))
if nDisk == len(inputData):
diskSites.append(site)
if nDisk > maxOnDisk:
maxOnDisk = nDisk
bestSites = [site]
elif nDisk == maxOnDisk:
bestSites.append(site)
# If there are selected sites, those are disk only sites
if diskSites:
self.jobLog.info("No staging required")
return (False, diskSites)
self.jobLog.info("Staging required")
if len(bestSites) > 1:
random.shuffle(bestSites)
return (True, bestSites)
def __preRequestStaging(self, jobManifest, stageSite, opData):
tapeSEs = []
diskSEs = []
vo = jobManifest.getOption('VirtualOrganization')
inputDataPolicy = jobManifest.getOption('InputDataPolicy', 'Protocol')
connectionLevel = 'DOWNLOAD' if 'download' in inputDataPolicy.lower() else 'PROTOCOL'
# Allow staging from SEs accessible by protocol
result = DMSHelpers(vo=vo).getSEsForSite(stageSite, connectionLevel=connectionLevel)
if not result['OK']:
return S_ERROR('Could not determine SEs for site %s' % stageSite)
siteSEs = result['Value']
for seName in siteSEs:
se = StorageElement(seName, vo=vo)
seStatus = se.getStatus()
if not seStatus['OK']:
return seStatus
seStatus = seStatus['Value']
if seStatus['Read'] and seStatus['TapeSE']:
tapeSEs.append(seName)
if seStatus['Read'] and seStatus['DiskSE']:
diskSEs.append(seName)
if not tapeSEs:
return S_ERROR("No Local SEs for site %s" % stageSite)
self.jobLog.verbose("Tape SEs are %s" % (", ".join(tapeSEs)))
# I swear this is horrible DM code it's not mine.
# Eternity of hell to the inventor of the Value of Value of Success of...
inputData = opData['Value']['Value']['Successful']
stageLFNs = {}
lfnToStage = []
for lfn in inputData:
replicas = inputData[lfn]
# Check SEs
seStage = []
for seName in replicas:
if seName in diskSEs:
# This lfn is in disk. Skip it
seStage = []
break
if seName not in tapeSEs:
# This lfn is not in this tape SE. Check next SE
continue
seStage.append(seName)
for seName in seStage:
if seName not in stageLFNs:
stageLFNs[seName] = []
stageLFNs[seName].append(lfn)
if lfn not in lfnToStage:
lfnToStage.append(lfn)
if not stageLFNs:
return S_ERROR("Cannot find tape replicas")
# Check if any LFN is in more than one SE
# If that's the case, try to stage from the SE that has more LFNs to stage to group the request
# 1.- Get the SEs ordered by ascending replicas
sortedSEs = reversed(sorted([(len(stageLFNs[seName]), seName) for seName in stageLFNs]))
for lfn in lfnToStage:
found = False
# 2.- Traverse the SEs
for _stageCount, seName in sortedSEs:
if lfn in stageLFNs[seName]:
# 3.- If first time found, just mark as found. Next time delete the replica from the request
if found:
stageLFNs[seName].remove(lfn)
else:
found = True
# 4.-If empty SE, remove
if not stageLFNs[seName]:
stageLFNs.pop(seName)
return S_OK(stageLFNs)
def __requestStaging(self, jobState, stageLFNs):
""" Actual request for staging LFNs through the StorageManagerClient
"""
self.jobLog.verbose("Stage request will be \n\t%s" % "\n\t".join(
["%s:%s" % (lfn, stageLFNs[lfn]) for lfn in stageLFNs]))
stagerClient = StorageManagerClient()
result = jobState.setStatus(self.ex_getOption('StagingStatus', 'Staging'),
self.ex_getOption('StagingMinorStatus', 'Request To Be Sent'),
appStatus="",
source=self.ex_optimizerName())
if not result['OK']:
return result
result = stagerClient.setRequest(stageLFNs, 'WorkloadManagement',
'updateJobFromStager@WorkloadManagement/JobStateUpdate',
int(jobState.jid))
if not result['OK']:
self.jobLog.error("Could not send stage request: %s" % result['Message'])
return S_ERROR("Problem sending staging request")
rid = str(result['Value'])
self.jobLog.info("Stage request %s sent" % rid)
self.storeOptimizerParam('StageRequest', rid)
result = jobState.setStatus(self.ex_getOption('StagingStatus', 'Staging'),
self.ex_getOption('StagingMinorStatus', 'Request Sent'),
appStatus="",
source=self.ex_optimizerName())
if not result['OK']:
return result
return S_OK(stageLFNs)
def __updateSharedSESites(self, jobManifest, stageSite, stagedLFNs, opData):
siteCandidates = opData['SiteCandidates']
seStatus = {}
vo = jobManifest.getOption('VirtualOrganization')
for siteName in siteCandidates:
if siteName == stageSite:
continue
self.jobLog.verbose("Checking %s for shared SEs" % siteName)
siteData = siteCandidates[siteName]
result = getSEsForSite(siteName)
if not result['OK']:
continue
closeSEs = result['Value']
diskSEs = []
for seName in closeSEs:
# If we don't have the SE status get it and store it
if seName not in seStatus:
seStatus[seName] = StorageElement(seName, vo=vo).status()
# get the SE status from mem and add it if its disk
status = seStatus[seName]
if status['Read'] and status['DiskSE']:
diskSEs.append(seName)
self.jobLog.verbose("Disk SEs for %s are %s" % (siteName, ", ".join(diskSEs)))
# Hell again to the dev of this crappy value of value of successful of ...
lfnData = opData['Value']['Value']['Successful']
for seName in stagedLFNs:
# If the SE is not close then skip it
if seName not in closeSEs:
continue
for lfn in stagedLFNs[seName]:
self.jobLog.verbose("Checking %s for %s" % (seName, lfn))
# I'm pretty sure that this cannot happen :P
if lfn not in lfnData:
continue
# Check if it's already on disk at the site
onDisk = False
for siteSE in lfnData[lfn]:
if siteSE in diskSEs:
self.jobLog.verbose("%s on disk for %s" % (lfn, siteSE))
onDisk = True
# If not on disk, then update!
if not onDisk:
self.jobLog.verbose("Setting LFN to disk for %s" % (seName))
siteData['disk'] += 1
siteData['tape'] -= 1
def __setJobSite(self, jobState, siteList, onlineSites=None):
""" Set the site attribute
"""
if onlineSites is None:
onlineSites = []
numSites = len(siteList)
if numSites == 0:
self.jobLog.info("Any site is candidate")
return jobState.setAttribute("Site", "ANY")
elif numSites == 1:
self.jobLog.info("Only site %s is candidate" % siteList[0])
return jobState.setAttribute("Site", siteList[0])
# If the job has input data, the online sites are hosting the data
if len(onlineSites) == 1:
siteName = "Group.%s" % ".".join(list(onlineSites)[0].split(".")[1:])
self.jobLog.info("Group %s is candidate" % siteName)
elif onlineSites:
# More than one site with input
siteName = "MultipleInput"
self.jobLog.info("Several input sites are candidate: %s" % ','.join(onlineSites))
else:
# No input site reported (could be a user job)
siteName = "Multiple"
self.jobLog.info("Multiple sites are candidate")
return jobState.setAttribute("Site", siteName)
def __checkStageAllowed(self, jobState):
"""Check if the job credentials allow to stage date """
result = jobState.getAttribute("OwnerGroup")
if not result['OK']:
self.jobLog.error("Cannot retrieve OwnerGroup from DB: %s" % result['Message'])
return S_ERROR("Cannot get OwnerGroup")
group = result['Value']
return S_OK(Properties.STAGE_ALLOWED in Registry.getPropertiesForGroup(group))
|
fstagni/DIRAC
|
WorkloadManagementSystem/Executor/JobScheduling.py
|
Python
|
gpl-3.0
| 26,829
|
[
"DIRAC"
] |
161a1cbdd18f14ed9b8f44d0f56cf37b1870aa81f00019dfea78670f091a6faf
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A collection of routines which create standard Cubes for test purposes.
"""
import os.path
import numpy as np
import numpy.ma as ma
from iris.cube import Cube
import iris.aux_factory
import iris.coords
import iris.coords as icoords
import iris.tests as tests
from iris.coord_systems import GeogCS, RotatedGeogCS
import numpy.ma as ma
def lat_lon_cube():
"""
Returns a cube with a latitude and longitude suitable for testing
saving to PP/NetCDF etc.
"""
cube = Cube(np.arange(12, dtype=np.int32).reshape((3, 4)))
cs = GeogCS(6371229)
coord = iris.coords.DimCoord(points=np.array([-1, 0, 1], dtype=np.int32),
standard_name='latitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 0)
coord = iris.coords.DimCoord(points=np.array([-1, 0, 1, 2], dtype=np.int32),
standard_name='longitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 1)
return cube
def global_pp():
"""
Returns a two-dimensional cube derived from PP/aPPglob1/global.pp.
The standard_name and unit attributes are added to compensate for the
broken STASH encoding in that file.
"""
def callback_global_pp(cube, field, filename):
cube.standard_name = 'air_temperature'
cube.units = 'K'
path = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
cube = iris.load_cube(path, callback=callback_global_pp)
return cube
def simple_pp():
filename = tests.get_data_path(['PP', 'simple_pp', 'global.pp']) # Differs from global_pp()
cube = iris.load_cube(filename)
return cube
def simple_1d(with_bounds=True):
"""
Returns an abstract, one-dimensional cube.
>>> print simple_1d()
thingness (foo: 11)
Dimension coordinates:
foo x
>>> print `simple_1d().data`
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
cube = Cube(np.arange(11, dtype=np.int32))
cube.long_name = 'thingness'
cube.units = '1'
points = np.arange(11, dtype=np.int32) + 1
bounds = np.column_stack([np.arange(11, dtype=np.int32), np.arange(11, dtype=np.int32) + 1])
coord = iris.coords.DimCoord(points, long_name='foo', units='1', bounds=bounds)
cube.add_dim_coord(coord, 0)
return cube
def simple_2d(with_bounds=True):
"""
Returns an abstract, two-dimensional, optionally bounded, cube.
>>> print simple_2d()
thingness (bar: 3; foo: 4)
Dimension coordinates:
bar x -
foo - x
>>> print `simple_2d().data`
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
"""
cube = Cube(np.arange(12, dtype=np.int32).reshape((3, 4)))
cube.long_name = 'thingness'
cube.units = '1'
y_points = np.array([ 2.5, 7.5, 12.5])
y_bounds = np.array([[0, 5], [5, 10], [10, 15]], dtype=np.int32)
y_coord = iris.coords.DimCoord(y_points, long_name='bar', units='1', bounds=y_bounds if with_bounds else None)
x_points = np.array([ -7.5, 7.5, 22.5, 37.5])
x_bounds = np.array([[-15, 0], [0, 15], [15, 30], [30, 45]], dtype=np.int32)
x_coord = iris.coords.DimCoord(x_points, long_name='foo', units='1', bounds=x_bounds if with_bounds else None)
cube.add_dim_coord(y_coord, 0)
cube.add_dim_coord(x_coord, 1)
return cube
def simple_2d_w_multidim_coords(with_bounds=True):
"""
Returns an abstract, two-dimensional, optionally bounded, cube.
>>> print simple_2d_w_multidim_coords()
thingness (*ANONYMOUS*: 3; *ANONYMOUS*: 4)
Auxiliary coordinates:
bar x x
foo x x
>>> print `simple_2d().data`
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]
"""
cube = simple_3d_w_multidim_coords(with_bounds)[0, :, :]
cube.remove_coord('wibble')
cube.data = np.arange(12, dtype=np.int32).reshape((3, 4))
return cube
def simple_3d_w_multidim_coords(with_bounds=True):
"""
Returns an abstract, two-dimensional, optionally bounded, cube.
>>> print simple_3d_w_multidim_coords()
thingness (wibble: 2; *ANONYMOUS*: 3; *ANONYMOUS*: 4)
Dimension coordinates:
wibble x - -
Auxiliary coordinates:
bar - x x
foo - x x
>>> print simple_3d_w_multidim_coords().data
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
"""
cube = Cube(np.arange(24, dtype=np.int32).reshape((2, 3, 4)))
cube.long_name = 'thingness'
cube.units = '1'
y_points = np.array([[2.5, 7.5, 12.5, 17.5],
[10., 17.5, 27.5, 42.5],
[15., 22.5, 32.5, 50.]])
y_bounds = np.array([[[0, 5], [5, 10], [10, 15], [15, 20]],
[[5, 15], [15, 20], [20, 35], [35, 50]],
[[10, 20], [20, 25], [25, 40], [40, 60]]],
dtype=np.int32)
y_coord = iris.coords.AuxCoord(points=y_points, long_name='bar',
units='1',
bounds=y_bounds if with_bounds else None)
x_points = np.array([[-7.5, 7.5, 22.5, 37.5],
[-12.5, 4., 26.5, 47.5],
[2.5, 14., 36.5, 44.]])
x_bounds = np.array([[[-15, 0], [0, 15], [15, 30], [30, 45]],
[[-25, 0], [0, 8], [8, 45], [45, 50]],
[[-5, 10], [10, 18], [18, 55], [18, 70]]],
dtype=np.int32)
x_coord = iris.coords.AuxCoord(points=x_points, long_name='foo',
units='1',
bounds=x_bounds if with_bounds else None)
wibble_coord = iris.coords.DimCoord(np.array([10., 30.],
dtype=np.float32),
long_name='wibble', units='1')
cube.add_dim_coord(wibble_coord, [0])
cube.add_aux_coord(y_coord, [1, 2])
cube.add_aux_coord(x_coord, [1, 2])
return cube
def simple_3d():
"""
Returns an abstract three dimensional cube.
>>>print simple_3d()
thingness / (1) (wibble: 2; latitude: 3; longitude: 4)
Dimension coordinates:
wibble x - -
latitude - x -
longitude - - x
>>> print simple_3d().data
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
"""
cube = Cube(np.arange(24, dtype=np.int32).reshape((2, 3, 4)))
cube.long_name = 'thingness'
cube.units = '1'
wibble_coord = iris.coords.DimCoord(np.array([10., 30.],
dtype=np.float32),
long_name='wibble', units='1')
lon = iris.coords.DimCoord([-180, -90, 0, 90],
standard_name='longitude',
units='degrees', circular=True)
lat = iris.coords.DimCoord([90, 0, -90],
standard_name='latitude', units='degrees')
cube.add_dim_coord(wibble_coord, [0])
cube.add_dim_coord(lat, [1])
cube.add_dim_coord(lon, [2])
return cube
def simple_3d_mask():
"""
Returns an abstract three dimensional cube that has data masked.
>>>print simple_3d_mask()
thingness / (1) (wibble: 2; latitude: 3; longitude: 4)
Dimension coordinates:
wibble x - -
latitude - x -
longitude - - x
>>> print simple_3d_mask().data
[[[-- -- -- --]
[-- -- -- --]
[-- 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
"""
cube = simple_3d()
cube.data = ma.asanyarray(cube.data)
cube.data = ma.masked_less_equal(cube.data, 8.)
return cube
def track_1d(duplicate_x=False):
"""
Returns a one-dimensional track through two-dimensional space.
>>> print track_1d()
air_temperature (y, x: 11)
Dimensioned coords:
x -> x
y -> y
Single valued coords:
>>> print `track_1d().data`
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
"""
cube = Cube(np.arange(11, dtype=np.int32), standard_name='air_temperature', units='K')
bounds = np.column_stack([np.arange(11, dtype=np.int32), np.arange(11, dtype=np.int32) + 1])
pts = bounds[:, 1]
coord = iris.coords.AuxCoord(pts, 'projection_x_coordinate', units='1', bounds=bounds)
cube.add_aux_coord(coord, [0])
if duplicate_x:
coord = iris.coords.AuxCoord(pts, 'projection_x_coordinate', units='1', bounds=bounds)
cube.add_aux_coord(coord, [0])
coord = iris.coords.AuxCoord(pts * 2, 'projection_y_coordinate', units='1', bounds=bounds * 2)
cube.add_aux_coord(coord, 0)
return cube
def simple_2d_w_multidim_and_scalars():
data = np.arange(50, dtype=np.int32).reshape((5, 10))
cube = iris.cube.Cube(data, long_name='test 2d dimensional cube', units='meters')
# DimCoords
dim1 = iris.coords.DimCoord(np.arange(5, dtype=np.float32) * 5.1 + 3.0, long_name='dim1', units='meters')
dim2 = iris.coords.DimCoord(np.arange(10, dtype=np.int32), long_name='dim2', units='meters',
bounds=np.arange(20, dtype=np.int32).reshape(10, 2))
# Scalars
an_other = iris.coords.AuxCoord(3.0, long_name='an_other', units='meters')
yet_an_other = iris.coords.DimCoord(23.3, standard_name='air_temperature',
long_name='custom long name',
var_name='custom_var_name',
units='K')
# Multidim
my_multi_dim_coord = iris.coords.AuxCoord(np.arange(50, dtype=np.int32).reshape(5, 10),
long_name='my_multi_dim_coord', units='1',
bounds=np.arange(200, dtype=np.int32).reshape(5, 10, 4))
cube.add_dim_coord(dim1, 0)
cube.add_dim_coord(dim2, 1)
cube.add_aux_coord(an_other)
cube.add_aux_coord(yet_an_other)
cube.add_aux_coord(my_multi_dim_coord, [0, 1])
return cube
def hybrid_height():
"""
Returns a two-dimensional (Z, X), hybrid-height cube.
>>> print hybrid_height()
TODO: Update!
air_temperature (level_height: 3; *ANONYMOUS*: 4)
Dimension coordinates:
level_height x -
Auxiliary coordinates:
model_level_number x -
sigma x -
surface_altitude - x
Derived coordinates:
altitude x x
>>> print hybrid_height().data
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
"""
data = np.arange(12, dtype='i8').reshape((3, 4))
orography = icoords.AuxCoord([10, 25, 50, 5], standard_name='surface_altitude', units='m')
model_level = icoords.AuxCoord([2, 1, 0], standard_name='model_level_number')
level_height = icoords.DimCoord([100, 50, 10], long_name='level_height',
units='m', attributes={'positive': 'up'},
bounds=[[150, 75], [75, 20], [20, 0]])
sigma = icoords.AuxCoord([0.8, 0.9, 0.95], long_name='sigma',
bounds=[[0.7, 0.85], [0.85, 0.97], [0.97, 1.0]])
hybrid_height = iris.aux_factory.HybridHeightFactory(level_height, sigma, orography)
cube = iris.cube.Cube(data, standard_name='air_temperature', units='K',
dim_coords_and_dims=[(level_height, 0)],
aux_coords_and_dims=[(orography, 1), (model_level, 0), (sigma, 0)],
aux_factories=[hybrid_height])
return cube
def simple_4d_with_hybrid_height():
cube = iris.cube.Cube(np.arange(3*4*5*6, dtype='i8').reshape(3,4,5,6),
"air_temperature", units="K")
cube.add_dim_coord(iris.coords.DimCoord(np.arange(3, dtype='i8'), "time",
units="hours since epoch"), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(4, dtype='i8')+10,
"model_level_number", units="1"), 1)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(5, dtype='i8')+20,
"grid_latitude",
units="degrees"), 2)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(6, dtype='i8')+30,
"grid_longitude",
units="degrees"), 3)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(4, dtype='i8')+40,
long_name="level_height",
units="m"), 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(4, dtype='i8')+50,
long_name="sigma", units="1"), 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(5*6, dtype='i8').reshape(5,6)+100,
long_name="surface_altitude",
units="m"), [2,3])
cube.add_aux_factory(iris.aux_factory.HybridHeightFactory(
delta=cube.coord("level_height"),
sigma=cube.coord("sigma"),
orography=cube.coord("surface_altitude")))
return cube
def realistic_4d():
"""
Returns a realistic 4d cube.
>>> print repr(realistic_4d())
<iris 'Cube' of air_potential_temperature (time: 6; model_level_number: 70; grid_latitude: 100; grid_longitude: 100)>
"""
# the stock arrays were created in Iris 0.8 with:
# >>> fname = iris.sample_data_path('PP', 'COLPEX', 'theta_and_orog_subset.pp')
# >>> theta = iris.load_cube(fname, 'air_potential_temperature')
# >>> for coord in theta.coords():
# ... print coord.name, coord.has_points(), coord.has_bounds(), coord.units
# ...
# grid_latitude True True degrees
# grid_longitude True True degrees
# level_height True True m
# model_level True False 1
# sigma True True 1
# time True False hours since 1970-01-01 00:00:00
# source True False no_unit
# forecast_period True False hours
# >>> arrays = []
# >>> for coord in theta.coords():
# ... if coord.has_points(): arrays.append(coord.points)
# ... if coord.has_bounds(): arrays.append(coord.bounds)
# >>> arrays.append(theta.data)
# >>> arrays.append(theta.coord('sigma').coord_system.orography.data)
# >>> np.savez('stock_arrays.npz', *arrays)
data_path = os.path.join(os.path.dirname(__file__), 'stock_arrays.npz')
r = np.load(data_path)
# sort the arrays based on the order they were originally given. The names given are of the form 'arr_1' or 'arr_10'
_, arrays = zip(*sorted(r.iteritems(), key=lambda item: int(item[0][4:])))
lat_pts, lat_bnds, lon_pts, lon_bnds, level_height_pts, \
level_height_bnds, model_level_pts, sigma_pts, sigma_bnds, time_pts, \
_source_pts, forecast_period_pts, data, orography = arrays
ll_cs = RotatedGeogCS(37.5, 177.5, ellipsoid=GeogCS(6371229.0))
lat = icoords.DimCoord(lat_pts, standard_name='grid_latitude', units='degrees',
bounds=lat_bnds, coord_system=ll_cs)
lon = icoords.DimCoord(lon_pts, standard_name='grid_longitude', units='degrees',
bounds=lon_bnds, coord_system=ll_cs)
level_height = icoords.DimCoord(level_height_pts, long_name='level_height',
units='m', bounds=level_height_bnds,
attributes={'positive': 'up'})
model_level = icoords.DimCoord(model_level_pts, standard_name='model_level_number',
units='1', attributes={'positive': 'up'})
sigma = icoords.AuxCoord(sigma_pts, long_name='sigma', units='1', bounds=sigma_bnds)
orography = icoords.AuxCoord(orography, standard_name='surface_altitude', units='m')
time = icoords.DimCoord(time_pts, standard_name='time', units='hours since 1970-01-01 00:00:00')
forecast_period = icoords.DimCoord(forecast_period_pts, standard_name='forecast_period', units='hours')
hybrid_height = iris.aux_factory.HybridHeightFactory(level_height, sigma, orography)
cube = iris.cube.Cube(data, standard_name='air_potential_temperature', units='K',
dim_coords_and_dims=[(time, 0), (model_level, 1), (lat, 2), (lon, 3)],
aux_coords_and_dims=[(orography, (2, 3)), (level_height, 1), (sigma, 1),
(forecast_period, None)],
attributes={'source': 'Iris test case'},
aux_factories=[hybrid_height])
return cube
def realistic_4d_no_derived():
"""
Returns a realistic 4d cube without hybrid height
>>> print repr(realistic_4d())
<iris 'Cube' of air_potential_temperature (time: 6; model_level_number: 70; grid_latitude: 100; grid_longitude: 100)>
"""
cube = realistic_4d()
# TODO determine appropriate way to remove aux_factory from a cube
cube._aux_factories = []
return cube
def realistic_4d_w_missing_data():
data_path = os.path.join(os.path.dirname(__file__), 'stock_mdi_arrays.npz')
data_archive = np.load(data_path)
data = ma.masked_array(data_archive['arr_0'], mask=data_archive['arr_1'])
# sort the arrays based on the order they were originally given. The names given are of the form 'arr_1' or 'arr_10'
ll_cs = GeogCS(6371229)
lat = iris.coords.DimCoord(np.arange(20, dtype=np.float32), standard_name='grid_latitude',
units='degrees', coord_system=ll_cs)
lon = iris.coords.DimCoord(np.arange(20, dtype=np.float32), standard_name='grid_longitude',
units='degrees', coord_system=ll_cs)
time = iris.coords.DimCoord([1000., 1003., 1006.], standard_name='time',
units='hours since 1970-01-01 00:00:00')
forecast_period = iris.coords.DimCoord([0.0, 3.0, 6.0], standard_name='forecast_period', units='hours')
pressure = iris.coords.DimCoord(np.array([ 800., 900., 1000.], dtype=np.float32),
long_name='pressure', units='hPa')
cube = iris.cube.Cube(data, long_name='missing data test data', units='K',
dim_coords_and_dims=[(time, 0), (pressure, 1), (lat, 2), (lon, 3)],
aux_coords_and_dims=[(forecast_period, 0)],
attributes={'source':'Iris test case'})
return cube
def global_grib2():
path = tests.get_data_path(('GRIB', 'global_t', 'global.grib2'))
cube = iris.load_cube(path)
return cube
|
kwilliams-mo/iris
|
lib/iris/tests/stock.py
|
Python
|
gpl-3.0
| 20,802
|
[
"NetCDF"
] |
8dcd049e5168db58334392d49d889d73381bb8337849d47dc0d634aeebafb0aa
|
from __future__ import annotations
import logging
import os
import sys
import time
import traceback
from libtbx import Auto
import xia2.Handlers.Streams
from xia2.Applications.xia2_main import check_environment, write_citations
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import cleanup
from xia2.Handlers.Phil import PhilIndex
from xia2.XIA2Version import Version
logger = logging.getLogger("xia2.cli.rescale")
def run():
if os.path.exists("xia2-working.phil"):
sys.argv.append("xia2-working.phil")
try:
check_environment()
except Exception as e:
with open("xia2-error.txt", "w") as fh:
traceback.print_exc(file=fh)
logger.error('Status: error "%s"', str(e))
# print the version
logger.info(Version)
Citations.cite("xia2")
start_time = time.time()
assert os.path.exists("xia2.json")
from xia2.Schema.XProject import XProject
xinfo = XProject.from_json(filename="xia2.json")
with cleanup(xinfo.path):
crystals = xinfo.get_crystals()
for crystal_id, crystal in crystals.items():
scale_dir = PhilIndex.params.xia2.settings.scale.directory
if scale_dir is Auto:
scale_dir = "scale"
i = 0
while os.path.exists(os.path.join(crystal.get_name(), scale_dir)):
i += 1
scale_dir = "scale%i" % i
PhilIndex.params.xia2.settings.scale.directory = scale_dir
# reset scaler
crystals[crystal_id]._scaler = None
crystal._get_scaler()
logger.info(xinfo.get_output())
crystal.serialize()
duration = time.time() - start_time
# write out the time taken in a human readable way
logger.info(
"Processing took %s", time.strftime("%Hh %Mm %Ss", time.gmtime(duration))
)
write_citations()
xinfo.as_json(filename="xia2.json")
def run_with_log():
xia2.Handlers.Streams.setup_logging(
logfile="xia2.rescale.txt", debugfile="xia2.rescale-debug.txt"
)
run()
|
xia2/xia2
|
src/xia2/cli/rescale.py
|
Python
|
bsd-3-clause
| 2,145
|
[
"CRYSTAL"
] |
a3a1b0f6c1fdacb2518f986e36ee090de765ad89299362e8e2f87234b230b0ab
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import galaxy.main.mixins
from django.conf import settings
import galaxy.main.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0029_importtask_github_branch'),
]
operations = [
migrations.CreateModel(
name='Stargazer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', galaxy.main.fields.TruncatingCharField(default=b'', max_length=255, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True, db_index=True)),
('github_user', models.CharField(max_length=256, verbose_name=b'Github Username')),
('github_repo', models.CharField(max_length=256, verbose_name=b'Github Repository')),
('owner', models.ForeignKey(related_name='starred', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('owner', 'github_user', 'github_repo'),
},
bases=(models.Model, galaxy.main.mixins.DirtyMixin),
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', galaxy.main.fields.TruncatingCharField(default=b'', max_length=255, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True, db_index=True)),
('github_user', models.CharField(max_length=256, verbose_name=b'Github Username')),
('github_repo', models.CharField(max_length=256, verbose_name=b'Github Repository')),
('owner', models.ForeignKey(related_name='subscriptions', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('owner', 'github_user', 'github_repo'),
},
bases=(models.Model, galaxy.main.mixins.DirtyMixin),
),
migrations.AlterIndexTogether(
name='subscription',
index_together=set([('owner', 'github_user', 'github_repo')]),
),
migrations.AlterIndexTogether(
name='stargazer',
index_together=set([('owner', 'github_user', 'github_repo')]),
),
]
|
chouseknecht/galaxy
|
galaxy/main/migrations/0030_auto_20151127_0824.py
|
Python
|
apache-2.0
| 2,776
|
[
"Galaxy"
] |
3335357232eb0066b791f86f5dcf58a51dd65e506292f96a631e4089778f28ae
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Views tests for the OSF."""
from __future__ import absolute_import
import datetime as dt
import httplib as http
import json
import time
import unittest
import urllib
from flask import request
import mock
import pytest
from nose.tools import * # noqa PEP8 asserts
from django.utils import timezone
from django.apps import apps
from django.core.exceptions import ValidationError
from django.db import connection, transaction
from django.test import TransactionTestCase
from django.test.utils import CaptureQueriesContext
from addons.github.tests.factories import GitHubAccountFactory
from addons.wiki.models import WikiPage
from framework.auth import cas, authenticate
from framework.flask import redirect
from framework.auth.core import generate_verification_key
from framework import auth
from framework.auth.campaigns import get_campaigns, is_institution_login, is_native_login, is_proxy_login, campaign_url_for
from framework.auth import Auth
from framework.auth.cas import get_login_url
from framework.auth.exceptions import InvalidTokenError
from framework.auth.utils import impute_names_model, ensure_external_identity_uniqueness
from framework.auth.views import login_and_register_handler
from framework.celery_tasks import handlers
from framework.exceptions import HTTPError, TemplateHTTPError
from framework.transactions.handlers import no_auto_transaction
from website import mailchimp_utils, mails, settings, language
from addons.osfstorage import settings as osfstorage_settings
from osf.models import AbstractNode, NodeLog, QuickFilesNode
from website.profile.utils import add_contributor_json, serialize_unregistered
from website.profile.views import fmt_date_or_none, update_osf_help_mails_subscription
from website.project.decorators import check_can_access
from website.project.model import has_anonymous_link
from website.project.signals import contributor_added
from website.project.views.contributor import (
deserialize_contributors,
notify_added_contributor,
send_claim_email,
send_claim_registered_email,
)
from website.project.views.node import _should_show_wiki_widget, _view_project, abbrev_authors
from website.util import api_url_for, web_url_for
from website.util import rubeus
from osf.utils import permissions
from osf.models import Comment
from osf.models import OSFUser
from osf.models import Email
from tests.base import (
assert_is_redirect,
capture_signals,
fake,
get_default_metaschema,
OsfTestCase,
assert_datetime_equal,
)
from tests.base import test_app as mock_app
from tests.test_cas_authentication import generate_external_user_with_resp, make_external_response
from api_tests.utils import create_test_file
pytestmark = pytest.mark.django_db
from osf.models import NodeRelation, QuickFilesNode, BlacklistedEmailDomain
from osf_tests.factories import (
fake_email,
ApiOAuth2ApplicationFactory,
ApiOAuth2PersonalTokenFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
InstitutionFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
ProjectWithAddonFactory,
RegistrationFactory,
RegistrationProviderFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
RegionFactory
)
@mock_app.route('/errorexc')
def error_exc():
UserFactory()
raise RuntimeError
@mock_app.route('/error500')
def error500():
UserFactory()
return 'error', 500
@mock_app.route('/noautotransact')
@no_auto_transaction
def no_auto_transact():
UserFactory()
return 'error', 500
class TestViewsAreAtomic(OsfTestCase):
def test_error_response_rolls_back_transaction(self):
original_user_count = OSFUser.objects.count()
self.app.get('/error500', expect_errors=True)
assert_equal(OSFUser.objects.count(), original_user_count)
# Need to set debug = False in order to rollback transactions in transaction_teardown_request
mock_app.debug = False
try:
self.app.get('/errorexc', expect_errors=True)
except RuntimeError:
pass
mock_app.debug = True
self.app.get('/noautotransact', expect_errors=True)
assert_equal(OSFUser.objects.count(), original_user_count + 1)
@pytest.mark.enable_bookmark_creation
class TestViewingProjectWithPrivateLink(OsfTestCase):
def setUp(self):
super(TestViewingProjectWithPrivateLink, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_edit_private_link_empty(self):
node = ProjectFactory(creator=self.user)
link = PrivateLinkFactory()
link.nodes.add(node)
link.save()
url = node.api_url_for('project_private_link_edit')
res = self.app.put_json(url, {'pk': link._id, 'value': ''}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Title cannot be blank', res.body)
def test_edit_private_link_invalid(self):
node = ProjectFactory(creator=self.user)
link = PrivateLinkFactory()
link.nodes.add(node)
link.save()
url = node.api_url_for('project_private_link_edit')
res = self.app.put_json(url, {'pk': link._id, 'value': '<a></a>'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Invalid link name.', res.body)
@mock.patch('framework.auth.core.Auth.private_link')
def test_can_be_anonymous_for_public_project(self, mock_property):
mock_property.return_value(mock.MagicMock())
mock_property.anonymous = True
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.add(self.project)
anonymous_link.save()
self.project.set_privacy('public')
self.project.save()
self.project.reload()
auth = Auth(user=self.user, private_key=anonymous_link.key)
assert_true(has_anonymous_link(self.project, auth))
def test_has_private_link_key(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_equal(res.status_code, 200)
def test_not_logged_in_no_key(self):
res = self.app.get(self.project_url, {'view_only': None})
assert_is_redirect(res)
res = res.follow(expect_errors=True)
assert_equal(res.status_code, 301)
assert_equal(
res.request.path,
'/login'
)
def test_logged_in_no_private_key(self):
res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_logged_in_has_key(self):
res = self.app.get(
self.project_url, {'view_only': self.link.key}, auth=self.user.auth)
assert_equal(res.status_code, 200)
@unittest.skip('Skipping for now until we find a way to mock/set the referrer')
def test_prepare_private_key(self):
res = self.app.get(self.project_url, {'key': self.link.key})
res = res.click('Registrations')
assert_is_redirect(res)
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.GET['key'], self.link.key)
def test_cannot_access_registrations_or_forks_with_anon_key(self):
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.add(self.project)
anonymous_link.save()
self.project.is_public = False
self.project.save()
url = self.project_url + 'registrations/?view_only={}'.format(anonymous_link.key)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_can_access_registrations_and_forks_with_not_anon_key(self):
link = PrivateLinkFactory(anonymous=False)
link.nodes.add(self.project)
link.save()
self.project.is_public = False
self.project.save()
url = self.project_url + 'registrations/?view_only={}'.format(self.link.key)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_in(url.replace('/project/', ''), res.location)
def test_check_can_access_valid(self):
contributor = AuthUserFactory()
self.project.add_contributor(contributor, auth=Auth(self.project.creator))
self.project.save()
assert_true(check_can_access(self.project, contributor))
def test_check_user_access_invalid(self):
noncontrib = AuthUserFactory()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_check_user_access_if_user_is_None(self):
assert_false(check_can_access(self.project, None))
def test_check_can_access_invalid_access_requests_enabled(self):
noncontrib = AuthUserFactory()
assert self.project.access_requests_enabled
with assert_raises(TemplateHTTPError):
check_can_access(self.project, noncontrib)
def test_check_can_access_invalid_access_requests_disabled(self):
noncontrib = AuthUserFactory()
self.project.access_requests_enabled = False
self.project.save()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
@pytest.mark.enable_bookmark_creation
class TestProjectViews(OsfTestCase):
def setUp(self):
super(TestProjectViews, self).setUp()
self.user1 = AuthUserFactory()
self.user1.save()
self.consolidate_auth1 = Auth(user=self.user1)
self.auth = self.user1.auth
self.user2 = AuthUserFactory()
self.auth2 = self.user2.auth
# A project has 2 contributors
self.project = ProjectFactory(
title='Ham',
description='Honey-baked',
creator=self.user1
)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
self.project2 = ProjectFactory(
title='Tofu',
description='Glazed',
creator=self.user1
)
self.project2.add_contributor(self.user2, auth=Auth(self.user1))
self.project2.save()
@mock.patch('framework.status.push_status_message')
def test_view_project_tos_status_message(self, mock_push_status_message):
self.app.get(
self.project.web_url_for('view_project'),
auth=self.auth
)
assert_true(mock_push_status_message.called)
assert_equal('terms_of_service', mock_push_status_message.mock_calls[0][2]['id'])
@mock.patch('framework.status.push_status_message')
def test_view_project_no_tos_status_message(self, mock_push_status_message):
self.user1.accepted_terms_of_service = timezone.now()
self.user1.save()
self.app.get(
self.project.web_url_for('view_project'),
auth=self.auth
)
assert_false(mock_push_status_message.called)
def test_node_setting_with_multiple_matched_institution_email_domains(self):
# User has alternate emails matching more than one institution's email domains
inst1 = InstitutionFactory(email_domains=['foo.bar'])
inst2 = InstitutionFactory(email_domains=['baz.qux'])
user = AuthUserFactory()
user.emails.create(address='queen@foo.bar')
user.emails.create(address='brian@baz.qux')
user.save()
project = ProjectFactory(creator=user)
# node settings page loads without error
url = project.web_url_for('node_setting')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, 200)
# user is automatically affiliated with institutions
# that matched email domains
user.reload()
assert_in(inst1, user.affiliated_institutions.all())
assert_in(inst2, user.affiliated_institutions.all())
def test_edit_title_empty(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for('edit_node')
res = self.app.post_json(url, {'name': 'title', 'value': ''}, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Title cannot be blank', res.body)
def test_edit_title_invalid(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for('edit_node')
res = self.app.post_json(url, {'name': 'title', 'value': '<a></a>'}, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Invalid title.', res.body)
def test_view_project_doesnt_select_for_update(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for('view_project')
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
res = self.app.get(url, auth=self.user1.auth)
for_update_sql = connection.ops.for_update_sql()
assert_equal(res.status_code, 200)
assert not any(for_update_sql in query['sql'] for query in ctx.captured_queries)
def test_cannot_remove_only_visible_contributor(self):
user1_contrib = self.project.contributor_set.get(user=self.user1)
user1_contrib.visible = False
user1_contrib.save()
url = self.project.api_url_for('project_remove_contributor')
res = self.app.post_json(
url, {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
assert_true(self.project.is_contributor(self.user2))
def test_remove_only_visible_contributor_return_false(self):
user1_contrib = self.project.contributor_set.get(user=self.user1)
user1_contrib.visible = False
user1_contrib.save()
ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1)
assert_false(ret)
self.project.reload()
assert_true(self.project.is_contributor(self.user2))
def test_can_view_nested_project_as_admin(self):
self.parent_project = NodeFactory(
title='parent project',
category='project',
parent=self.project,
is_public=False
)
self.parent_project.save()
self.child_project = NodeFactory(
title='child project',
category='project',
parent=self.parent_project,
is_public=False
)
self.child_project.save()
url = self.child_project.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Private Project', res.body)
assert_in('parent project', res.body)
def test_edit_description(self):
url = '/api/v1/project/{0}/edit/'.format(self.project._id)
self.app.post_json(url,
{'name': 'description', 'value': 'Deep-fried'},
auth=self.auth)
self.project.reload()
assert_equal(self.project.description, 'Deep-fried')
def test_project_api_url(self):
url = self.project.api_url
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(data['node']['category'], 'Project')
assert_equal(data['node']['node_type'], 'project')
assert_equal(data['node']['title'], self.project.title)
assert_equal(data['node']['is_public'], self.project.is_public)
assert_equal(data['node']['is_registration'], False)
assert_equal(data['node']['id'], self.project._primary_key)
assert_true(data['user']['is_contributor'])
assert_equal(data['node']['description'], self.project.description)
assert_equal(data['node']['url'], self.project.url)
assert_equal(data['node']['tags'], list(self.project.tags.values_list('name', flat=True)))
assert_in('forked_date', data['node'])
assert_in('registered_from_url', data['node'])
# TODO: Test "parent" and "user" output
def test_add_contributor_post(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
user2 = UserFactory()
user3 = UserFactory()
url = '/api/v1/project/{0}/contributors/'.format(project._id)
dict2 = add_contributor_json(user2)
dict3 = add_contributor_json(user3)
dict2.update({
'permission': 'admin',
'visible': True,
})
dict3.update({
'permission': 'write',
'visible': False,
})
self.app.post_json(
url,
{
'users': [dict2, dict3],
'node_ids': [project._id],
},
content_type='application/json',
auth=self.auth,
).maybe_follow()
project.reload()
assert_in(user2, project.contributors)
# A log event was added
assert_equal(project.logs.latest().action, 'contributor_added')
assert_equal(len(project.contributors), 3)
assert_equal(project.get_permissions(user2), ['read', 'write', 'admin'])
assert_equal(project.get_permissions(user3), ['read', 'write'])
def test_manage_permissions(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user1._id, 'permission': 'read',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user1), ['read'])
assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin'])
def test_manage_permissions_again(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'read',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user2), ['read'])
assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin'])
def test_contributor_manage_reorder(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': False},
]
)
# Add a non-registered user
unregistered_user = project.add_unregistered_contributor(
fullname=fake.name(), email=fake_email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': reg_user2._id, 'permission': 'admin',
'registered': True, 'visible': False},
{'id': project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': unregistered_user._id, 'permission': 'admin',
'registered': False, 'visible': True},
{'id': reg_user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
project.reload()
assert_equal(
# Note: Cast ForeignList to list for comparison
list(project.contributors),
[reg_user2, project.creator, unregistered_user, reg_user1]
)
assert_equal(
list(project.visible_contributors),
[project.creator, unregistered_user, reg_user1]
)
def test_project_remove_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}
self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth).maybe_follow()
self.project.reload()
assert_not_in(self.user2._id, self.project.contributors)
# A log event was added
assert_equal(self.project.logs.latest().action, 'contributor_removed')
def test_multiple_project_remove_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.project._id, self.project2._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth).maybe_follow()
self.project.reload()
self.project2.reload()
assert_not_in(self.user2._id, self.project.contributors)
assert_not_in('/dashboard/', res.json)
assert_not_in(self.user2._id, self.project2.contributors)
# A log event was added
assert_equal(self.project.logs.latest().action, 'contributor_removed')
def test_private_project_remove_self_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# user2 removes self
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth2).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['redirectUrl'], '/dashboard/')
assert_not_in(self.user2._id, self.project.contributors)
def test_public_project_remove_self_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# user2 removes self
self.public_project = ProjectFactory(creator=self.user1, is_public=True)
self.public_project.add_contributor(self.user2, auth=Auth(self.user1))
self.public_project.save()
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.public_project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth2).maybe_follow()
self.public_project.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['redirectUrl'], '/' + self.public_project._id + '/')
assert_not_in(self.user2._id, self.public_project.contributors)
def test_project_remove_other_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user1._id,
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
expect_errors=True,
auth=self.auth2).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 403)
assert_equal(res.json['message_long'],
'You do not have permission to perform this action. '
'If this should not have occurred and the issue persists, '
+ language.SUPPORT_LINK
)
assert_in(self.user1, self.project.contributors)
def test_project_remove_fake_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': 'badid',
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
expect_errors=True,
auth=self.auth).maybe_follow()
self.project.reload()
# Assert the contributor id was invalid
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Contributor not found.')
assert_not_in('badid', self.project.contributors)
def test_project_remove_self_only_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user1._id,
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
expect_errors=True,
auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Could not remove contributor.')
assert_in(self.user1, self.project.contributors)
def test_get_contributors_abbrev(self):
# create a project with 3 registered contributors
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
]
)
# add an unregistered contributor
project.add_unregistered_contributor(
fullname=fake.name(), email=fake_email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url_for('get_node_contributors_abbrev')
res = self.app.get(url, auth=self.auth)
assert_equal(len(project.contributors), 4)
assert_equal(len(res.json['contributors']), 3)
assert_equal(len(res.json['others_count']), 1)
assert_equal(res.json['contributors'][0]['separator'], ',')
assert_equal(res.json['contributors'][1]['separator'], ',')
assert_equal(res.json['contributors'][2]['separator'], ' &')
def test_edit_node_title(self):
url = '/api/v1/project/{0}/edit/'.format(self.project._id)
# The title is changed though posting form data
self.app.post_json(url, {'name': 'title', 'value': 'Bacon'},
auth=self.auth).maybe_follow()
self.project.reload()
# The title was changed
assert_equal(self.project.title, 'Bacon')
# A log event was saved
assert_equal(self.project.logs.latest().action, 'edit_title')
def test_add_tag(self):
url = self.project.api_url_for('project_add_tag')
self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True))
assert_equal("foo'ta#@%#%^&g?", self.project.logs.latest().params['tag'])
def test_remove_tag(self):
self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True)
assert_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True))
url = self.project.api_url_for('project_remove_tag')
self.app.delete_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_not_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True))
latest_log = self.project.logs.latest()
assert_equal('tag_removed', latest_log.action)
assert_equal("foo'ta#@%#%^&g?", latest_log.params['tag'])
# Regression test for #OSF-5257
def test_removal_empty_tag_throws_error(self):
url = self.project.api_url_for('project_remove_tag')
res = self.app.delete_json(url, {'tag': ''}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
# Regression test for #OSF-5257
def test_removal_unknown_tag_throws_error(self):
self.project.add_tag('narf', auth=self.consolidate_auth1, save=True)
url = self.project.api_url_for('project_remove_tag')
res = self.app.delete_json(url, {'tag': 'troz'}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, http.CONFLICT)
def test_suspended_project(self):
node = NodeFactory(parent=self.project, creator=self.user1)
node.remove_node(Auth(self.user1))
node.reload()
node.suspended = True
node.save()
url = node.api_url
res = self.app.get(url, auth=Auth(self.user1), expect_errors=True)
assert_equal(res.status_code, 451)
def test_private_link_edit_name(self):
link = PrivateLinkFactory(name='link')
link.nodes.add(self.project)
link.save()
assert_equal(link.name, 'link')
url = self.project.api_url + 'private_link/edit/'
self.app.put_json(
url,
{'pk': link._id, 'value': 'new name'},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_equal(link.name, 'new name')
def test_remove_private_link(self):
link = PrivateLinkFactory()
link.nodes.add(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_true(link.is_deleted)
def test_remove_private_link_log(self):
link = PrivateLinkFactory()
link.nodes.add(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
last_log = self.project.logs.latest()
assert last_log.action == NodeLog.VIEW_ONLY_LINK_REMOVED
assert not last_log.params.get('anonymous_link')
def test_remove_private_link_anonymous_log(self):
link = PrivateLinkFactory(anonymous=True)
link.nodes.add(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
last_log = self.project.logs.latest()
assert last_log.action == NodeLog.VIEW_ONLY_LINK_REMOVED
assert last_log.params.get('anonymous_link')
def test_remove_component(self):
node = NodeFactory(parent=self.project, creator=self.user1)
url = node.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
node.reload()
assert_equal(node.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], self.project.url)
def test_cant_remove_component_if_not_admin(self):
node = NodeFactory(parent=self.project, creator=self.user1)
non_admin = AuthUserFactory()
node.add_contributor(
non_admin,
permissions=['read', 'write'],
save=True,
)
url = node.api_url
res = self.app.delete_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
).maybe_follow()
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(node.is_deleted)
def test_view_project_returns_whether_to_show_wiki_widget(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user, is_public=True)
project.add_contributor(user)
project.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, http.OK)
assert_in('show_wiki_widget', res.json['user'])
def test_fork_grandcomponents_has_correct_root(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
child = NodeFactory(parent=project, creator=user)
grand_child = NodeFactory(parent=child, creator=user)
project.save()
fork = project.fork_node(auth)
fork.save()
grand_child_fork = fork.nodes[0].nodes[0]
assert_equal(grand_child_fork.root, fork)
def test_fork_count_does_not_include_deleted_forks(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
fork.remove_node(auth)
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(0, res.json['node']['fork_count'])
def test_fork_count_does_not_include_fork_registrations(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
registration = RegistrationFactory(project=fork)
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(1, res.json['node']['fork_count'])
def test_registration_retraction_redirect(self):
url = self.project.web_url_for('node_registration_retraction_redirect')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302)
assert_in(self.project.web_url_for('node_registration_retraction_get', _guid=True), res.location)
def test_update_node(self):
url = self.project.api_url_for('update_node')
res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.title, 'newtitle')
# Regression test
def test_update_node_with_tags(self):
self.project.add_tag('cheezebørger', auth=Auth(self.project.creator), save=True)
url = self.project.api_url_for('update_node')
res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.title, 'newtitle')
# Regression test
def test_retraction_view(self):
project = ProjectFactory(creator=self.user1, is_public=True)
registration = RegistrationFactory(project=project, is_public=True)
reg_file = create_test_file(registration, user=registration.creator, create_guid=True)
registration.retract_registration(self.user1)
approval_token = registration.retraction.approval_state[self.user1._id]['approval_token']
registration.retraction.approve_retraction(self.user1, approval_token)
registration.save()
url = registration.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Mako Runtime Error', res.body)
assert_in(registration.title, res.body)
assert_equal(res.status_code, 200)
for route in ['files', 'wiki/home', 'contributors', 'settings', 'withdraw', 'register', 'register/fakeid']:
res = self.app.get('{}{}/'.format(url, route), auth=self.auth, allow_redirects=True)
assert_equal(res.status_code, 302, route)
res = res.follow()
assert_equal(res.status_code, 200, route)
assert_in('This project is a withdrawn registration of', res.body, route)
res = self.app.get('/{}/'.format(reg_file.guids.first()._id))
assert_equal(res.status_code, 200)
assert_in('This project is a withdrawn registration of', res.body)
class TestEditableChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True)
self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False)
self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True)
self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False)
url = self.project.api_url_for('get_editable_children')
self.project_results = self.app.get(url, auth=self.user.auth).json
def test_get_editable_children(self):
assert_equal(len(self.project_results['children']), 4)
assert_equal(self.project_results['node']['id'], self.project._id)
def test_editable_children_order(self):
assert_equal(self.project_results['children'][0]['id'], self.child._id)
assert_equal(self.project_results['children'][1]['id'], self.grandchild._id)
assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id)
assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id)
def test_editable_children_indents(self):
assert_equal(self.project_results['children'][0]['indent'], 0)
assert_equal(self.project_results['children'][1]['indent'], 1)
assert_equal(self.project_results['children'][2]['indent'], 2)
assert_equal(self.project_results['children'][3]['indent'], 3)
def test_editable_children_parents(self):
assert_equal(self.project_results['children'][0]['parent_id'], self.project._id)
assert_equal(self.project_results['children'][1]['parent_id'], self.child._id)
assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id)
assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id)
def test_editable_children_privacy(self):
assert_false(self.project_results['node']['is_public'])
assert_true(self.project_results['children'][0]['is_public'])
assert_false(self.project_results['children'][1]['is_public'])
assert_true(self.project_results['children'][2]['is_public'])
assert_false(self.project_results['children'][3]['is_public'])
def test_editable_children_titles(self):
assert_equal(self.project_results['node']['title'], self.project.title)
assert_equal(self.project_results['children'][0]['title'], self.child.title)
assert_equal(self.project_results['children'][1]['title'], self.grandchild.title)
assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title)
assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title)
class TestGetNodeTree(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
def test_get_single_node(self):
project = ProjectFactory(creator=self.user)
# child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
node_id = res.json[0]['node']['id']
assert_equal(node_id, project._primary_key)
def test_get_node_with_children(self):
project = ProjectFactory(creator=self.user)
child1 = NodeFactory(parent=project, creator=self.user)
child2 = NodeFactory(parent=project, creator=self.user2)
child3 = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
child_ids = [child['node']['id'] for child in tree['children']]
assert_equal(parent_node_id, project._primary_key)
assert_in(child1._primary_key, child_ids)
assert_in(child2._primary_key, child_ids)
assert_in(child3._primary_key, child_ids)
def test_get_node_with_child_linked_to_parent(self):
project = ProjectFactory(creator=self.user)
child1 = NodeFactory(parent=project, creator=self.user)
child1.add_pointer(project, Auth(self.user))
child1.save()
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
child1_id = tree['children'][0]['node']['id']
assert_equal(child1_id, child1._primary_key)
def test_get_node_not_parent_owner(self):
project = ProjectFactory(creator=self.user2)
child = NodeFactory(parent=project, creator=self.user2)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json, [])
# Parent node should show because of user2 read access, and only child3
def test_get_node_parent_not_admin(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user2, auth=Auth(self.user))
project.save()
child1 = NodeFactory(parent=project, creator=self.user)
child2 = NodeFactory(parent=project, creator=self.user)
child3 = NodeFactory(parent=project, creator=self.user)
child3.add_contributor(self.user2, auth=Auth(self.user))
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user2.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
children = tree['children']
assert_equal(parent_node_id, project._primary_key)
assert_equal(len(children), 1)
assert_equal(children[0]['node']['id'], child3._primary_key)
@pytest.mark.enable_enqueue_task
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_quickfiles_creation
class TestUserProfile(OsfTestCase):
def setUp(self):
super(TestUserProfile, self).setUp()
self.user = AuthUserFactory()
def test_fmt_date_or_none(self):
with assert_raises(HTTPError) as cm:
#enter a date before 1900
fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227))
# error should be raised because date is before 1900
assert_equal(cm.exception.code, http.BAD_REQUEST)
def test_unserialize_social(self):
url = api_url_for('unserialize_social')
payload = {
'profileWebsites': ['http://frozen.pizza.com/reviews'],
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
self.app.put_json(
url,
payload,
auth=self.user.auth,
)
self.user.reload()
for key, value in payload.items():
assert_equal(self.user.social[key], value)
assert_true(self.user.social['researcherId'] is None)
# Regression test for help-desk ticket
def test_making_email_primary_is_not_case_sensitive(self):
user = AuthUserFactory(username='fred@queen.test')
# make confirmed email have different casing
email = user.emails.first()
email.address = email.address.capitalize()
email.save()
url = api_url_for('update_user')
res = self.app.put_json(
url,
{'id': user._id, 'emails': [{'address': 'fred@queen.test', 'primary': True, 'confirmed': True}]},
auth=user.auth
)
assert_equal(res.status_code, 200)
def test_unserialize_social_validation_failure(self):
url = api_url_for('unserialize_social')
# profileWebsites URL is invalid
payload = {
'profileWebsites': ['http://goodurl.com', 'http://invalidurl'],
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
res = self.app.put_json(
url,
payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Invalid personal URL.')
def test_serialize_social_editable(self):
self.user.social['twitter'] = 'howtopizza'
self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'])
assert_true(res.json.get('github') is None)
assert_true(res.json['editable'])
def test_serialize_social_not_editable(self):
user2 = AuthUserFactory()
self.user.social['twitter'] = 'howtopizza'
self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'])
assert_true(res.json.get('github') is None)
assert_false(res.json['editable'])
def test_serialize_social_addons_editable(self):
self.user.add_addon('github')
github_account = GitHubAccountFactory()
github_account.save()
self.user.external_accounts.add(github_account)
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(
res.json['addons']['github'],
'abc'
)
def test_serialize_social_addons_not_editable(self):
user2 = AuthUserFactory()
self.user.add_addon('github')
github_account = GitHubAccountFactory()
github_account.save()
self.user.external_accounts.add(github_account)
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_not_in('addons', res.json)
def test_unserialize_and_serialize_jobs(self):
jobs = [{
'institution': 'an institution',
'department': 'a department',
'title': 'a title',
'startMonth': 'January',
'startYear': '2001',
'endMonth': 'March',
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'title': None,
'startMonth': 'May',
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.jobs), 2)
url = api_url_for('serialize_jobs')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(jobs):
assert_equal(job, res.json['contents'][i])
def test_unserialize_and_serialize_schools(self):
schools = [{
'institution': 'an institution',
'department': 'a department',
'degree': 'a degree',
'startMonth': 1,
'startYear': '2001',
'endMonth': 5,
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'degree': None,
'startMonth': 5,
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.schools), 2)
url = api_url_for('serialize_schools')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(schools):
assert_equal(job, res.json['contents'][i])
@mock.patch('osf.models.user.OSFUser.check_spam')
def test_unserialize_jobs(self, mock_check_spam):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# jobs field is updated
assert_equal(self.user.jobs, jobs)
assert mock_check_spam.called
def test_unserialize_names(self):
fake_fullname_w_spaces = ' {} '.format(fake.name())
names = {
'full': fake_fullname_w_spaces,
'given': 'Tea',
'middle': 'Gray',
'family': 'Pot',
'suffix': 'Ms.',
}
url = api_url_for('unserialize_names')
res = self.app.put_json(url, names, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# user is updated
assert_equal(self.user.fullname, fake_fullname_w_spaces.strip())
assert_equal(self.user.given_name, names['given'])
assert_equal(self.user.middle_names, names['middle'])
assert_equal(self.user.family_name, names['family'])
assert_equal(self.user.suffix, names['suffix'])
@mock.patch('osf.models.user.OSFUser.check_spam')
def test_unserialize_schools(self, mock_check_spam):
schools = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'degree': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# schools field is updated
assert_equal(self.user.schools, schools)
assert mock_check_spam.called
@mock.patch('osf.models.user.OSFUser.check_spam')
def test_unserialize_jobs_valid(self, mock_check_spam):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert mock_check_spam.called
def test_update_user_timezone(self):
assert_equal(self.user.timezone, 'Etc/UTC')
payload = {'timezone': 'America/New_York', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.timezone, 'America/New_York')
def test_update_user_locale(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': 'de_DE', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'de_DE')
def test_update_user_locale_none(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': None, 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_update_user_locale_empty_string(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': '', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_cannot_update_user_without_user_id(self):
user1 = AuthUserFactory()
url = api_url_for('update_user')
header = {'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_emails_return_emails(self, send_mail):
user1 = AuthUserFactory()
url = api_url_for('update_user')
email = 'test@cos.io'
header = {'id': user1._id,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user1.auth)
assert_equal(res.status_code, 200)
assert_in('emails', res.json['profile'])
assert_equal(len(res.json['profile']['emails']), 2)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_return_emails(self, send_mail):
user1 = AuthUserFactory()
url = api_url_for('resend_confirmation')
email = 'test@cos.io'
header = {'id': user1._id,
'email': {'address': email, 'primary': False, 'confirmed': False}
}
res = self.app.put_json(url, header, auth=user1.auth)
assert_equal(res.status_code, 200)
assert_in('emails', res.json['profile'])
assert_equal(len(res.json['profile']['emails']), 2)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail):
email = fake_email()
self.user.emails.create(address=email)
list_name = 'foo'
self.user.mailchimp_mailing_lists[list_name] = True
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
# the test app doesn't have celery handlers attached, so we need to call this manually.
handlers.celery_teardown_request()
assert mock_client.lists.unsubscribe.called
mock_client.lists.unsubscribe.assert_called_with(
id=list_id,
email={'email': self.user.username},
send_goodbye=True
)
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': email},
merge_vars={
'fname': self.user.given_name,
'lname': self.user.family_name,
},
double_optin=False,
update_existing=True
)
handlers.celery_teardown_request()
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail):
email = fake_email()
self.user.emails.create(address=email)
list_name = 'foo'
self.user.mailchimp_mailing_lists[list_name] = False
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(mock_client.lists.unsubscribe.call_count, 0)
assert_equal(mock_client.lists.subscribe.call_count, 0)
handlers.celery_teardown_request()
def test_user_with_quickfiles(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
create_test_file(quickfiles_node, self.user, filename='skrr_skrrrrrrr.pdf')
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
assert_in('Quick files', res.body)
def test_user_with_no_quickfiles(self):
assert(not QuickFilesNode.objects.first().files.filter(type='osf.osfstoragefile').exists())
url = web_url_for('profile_view_id', uid=self.user._primary_key)
res = self.app.get(url, auth=self.user.auth)
assert_not_in('Quick files', res.body)
def test_user_update_region(self):
user_settings = self.user.get_addon('osfstorage')
assert user_settings.default_region_id == 1
url = '/api/v1/profile/region/'
auth = self.user.auth
region = RegionFactory(name='Frankfort', _id='eu-central-1')
payload = {'region_id': 'eu-central-1'}
res = self.app.put_json(url, payload, auth=auth)
user_settings.reload()
assert user_settings.default_region_id == region.id
def test_user_update_region_missing_region_id_key(self):
url = '/api/v1/profile/region/'
auth = self.user.auth
region = RegionFactory(name='Frankfort', _id='eu-central-1')
payload = {'bad_key': 'eu-central-1'}
res = self.app.put_json(url, payload, auth=auth, expect_errors=True)
assert res.status_code == 400
def test_user_update_region_missing_bad_region(self):
url = '/api/v1/profile/region/'
auth = self.user.auth
payload = {'region_id': 'bad-region-1'}
res = self.app.put_json(url, payload, auth=auth, expect_errors=True)
assert res.status_code == 404
class TestUserProfileApplicationsPage(OsfTestCase):
def setUp(self):
super(TestUserProfileApplicationsPage, self).setUp()
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user)
self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id)
def test_non_owner_cant_access_detail_page(self):
res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_owner_cant_access_deleted_application(self):
self.platform_app.is_active = False
self.platform_app.save()
res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.GONE)
def test_owner_cant_access_nonexistent_application(self):
url = web_url_for('oauth_application_detail', client_id='nonexistent')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.NOT_FOUND)
def test_url_has_not_broken(self):
assert_equal(self.platform_app.url, self.detail_url)
class TestUserProfileTokensPage(OsfTestCase):
def setUp(self):
super(TestUserProfileTokensPage, self).setUp()
self.user = AuthUserFactory()
self.token = ApiOAuth2PersonalTokenFactory()
self.detail_url = web_url_for('personal_access_token_detail', _id=self.token._id)
def test_url_has_not_broken(self):
assert_equal(self.token.url, self.detail_url)
class TestUserAccount(OsfTestCase):
def setUp(self):
super(TestUserAccount, self).setUp()
self.user = AuthUserFactory()
self.user.set_password('password')
self.user.auth = (self.user.username, 'password')
self.user.save()
def test_password_change_valid(self,
old_password='password',
new_password='Pa$$w0rd',
confirm_password='Pa$$w0rd'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=(self.user.username, old_password))
assert_true(302, res.status_code)
res = res.follow(auth=(self.user.username, new_password))
assert_true(200, res.status_code)
self.user.reload()
assert_true(self.user.check_password(new_password))
@mock.patch('website.profile.views.push_status_message')
def test_user_account_password_reset_query_params(self, mock_push_status_message):
url = web_url_for('user_account') + '?password_reset=True'
res = self.app.get(url, auth=(self.user.auth))
assert_true(mock_push_status_message.called)
assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0])
@mock.patch('website.profile.views.push_status_message')
def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='',
confirm_password='', error_message='Old password is invalid'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_false(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
error_strings = [e[1][0] for e in mock_push_status_message.mock_calls]
assert_in(error_message, error_strings)
@mock.patch('website.profile.views.push_status_message')
def test_password_change_rate_limiting(self, mock_push_status_message):
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
url = web_url_for('user_account_password')
post_data = {
'old_password': 'invalid old password',
'new_password': 'this is a new password',
'confirm_password': 'this is a new password',
}
res = self.app.post(url, post_data, auth=self.user.auth)
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 1
assert_true(200, res.status_code)
# Make a second request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len( mock_push_status_message.mock_calls) == 2)
assert_true('Old password is invalid' == mock_push_status_message.mock_calls[1][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 2
# Make a third request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len( mock_push_status_message.mock_calls) == 3)
assert_true('Old password is invalid' == mock_push_status_message.mock_calls[2][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 3
# Make a fourth request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(mock_push_status_message.called)
error_strings = mock_push_status_message.mock_calls[3][2]
assert_in('Too many failed attempts', error_strings['message'])
self.user.reload()
# Too many failed requests within a short window. Throttled.
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 3
@mock.patch('website.profile.views.push_status_message')
def test_password_change_rate_limiting_not_imposed_if_old_password_correct(self, mock_push_status_message):
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
url = web_url_for('user_account_password')
post_data = {
'old_password': 'password',
'new_password': 'short',
'confirm_password': 'short',
}
res = self.app.post(url, post_data, auth=self.user.auth)
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
assert_true(200, res.status_code)
# Make a second request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len(mock_push_status_message.mock_calls) == 2)
assert_true('Password should be at least eight characters' == mock_push_status_message.mock_calls[1][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
# Make a third request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len(mock_push_status_message.mock_calls) == 3)
assert_true('Password should be at least eight characters' == mock_push_status_message.mock_calls[2][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
# Make a fourth request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(mock_push_status_message.called)
assert_true(len(mock_push_status_message.mock_calls) == 4)
assert_true('Password should be at least eight characters' == mock_push_status_message.mock_calls[3][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
@mock.patch('website.profile.views.push_status_message')
def test_old_password_invalid_attempts_reset_if_password_successfully_reset(self, mock_push_status_message):
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
url = web_url_for('user_account_password')
post_data = {
'old_password': 'invalid old password',
'new_password': 'this is a new password',
'confirm_password': 'this is a new password',
}
correct_post_data = {
'old_password': 'password',
'new_password': 'thisisanewpassword',
'confirm_password': 'thisisanewpassword',
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(len( mock_push_status_message.mock_calls) == 1)
assert_true('Old password is invalid' == mock_push_status_message.mock_calls[0][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 1
assert_true(200, res.status_code)
# Make a second request that successfully changes password
res = self.app.post(url, correct_post_data, auth=self.user.auth, expect_errors=True)
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 0
def test_password_change_invalid_old_password(self):
self.test_password_change_invalid(
old_password='invalid old password',
new_password='new password',
confirm_password='new password',
error_message='Old password is invalid',
)
def test_password_change_invalid_confirm_password(self):
self.test_password_change_invalid(
old_password='password',
new_password='new password',
confirm_password='invalid confirm password',
error_message='Password does not match the confirmation',
)
def test_password_change_invalid_new_password_length(self):
self.test_password_change_invalid(
old_password='password',
new_password='1234567',
confirm_password='1234567',
error_message='Password should be at least eight characters',
)
def test_password_change_valid_new_password_length(self):
self.test_password_change_valid(
old_password='password',
new_password='12345678',
confirm_password='12345678',
)
def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_password_change_invalid(
old_password=old_password,
new_password=new_password,
confirm_password=confirm_password,
error_message='Passwords cannot be blank',
)
def test_password_change_invalid_empty_string_new_password(self):
self.test_password_change_invalid_blank_password('password', '', 'new password')
def test_password_change_invalid_blank_new_password(self):
self.test_password_change_invalid_blank_password('password', ' ', 'new password')
def test_password_change_invalid_empty_string_confirm_password(self):
self.test_password_change_invalid_blank_password('password', 'new password', '')
def test_password_change_invalid_blank_confirm_password(self):
self.test_password_change_invalid_blank_password('password', 'new password', ' ')
@mock.patch('framework.auth.views.mails.send_mail')
def test_user_cannot_request_account_export_before_throttle_expires(self, send_mail):
url = api_url_for('request_export')
self.app.post(url, auth=self.user.auth)
assert_true(send_mail.called)
res = self.app.post(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(send_mail.call_count, 1)
@mock.patch('framework.auth.views.mails.send_mail')
def test_user_cannot_request_account_deactivation_before_throttle_expires(self, send_mail):
url = api_url_for('request_deactivation')
self.app.post(url, auth=self.user.auth)
assert_true(send_mail.called)
res = self.app.post(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(send_mail.call_count, 1)
def test_get_unconfirmed_emails_exclude_external_identity(self):
external_identity = {
'service': {
'AFI': 'LINK'
}
}
self.user.add_unconfirmed_email('james@steward.com')
self.user.add_unconfirmed_email('steward@james.com', external_identity=external_identity)
self.user.save()
unconfirmed_emails = self.user.get_unconfirmed_emails_exclude_external_identity()
assert_in('james@steward.com', unconfirmed_emails)
assert_not_in('steward@james.com', unconfirmed_emails)
@pytest.mark.enable_implicit_clean
class TestAddingContributorViews(OsfTestCase):
def setUp(self):
super(TestAddingContributorViews, self).setUp()
self.creator = AuthUserFactory()
self.project = ProjectFactory(creator=self.creator)
self.auth = Auth(self.project.creator)
# Authenticate all requests
self.app.authenticate(*self.creator.auth)
contributor_added.connect(notify_added_contributor)
def test_serialize_unregistered_without_record(self):
name, email = fake.name(), fake_email()
res = serialize_unregistered(fullname=name, email=email)
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
assert_equal(res['id'], None)
assert_false(res['registered'])
assert_true(res['profile_image_url'])
assert_false(res['active'])
def test_deserialize_contributors(self):
contrib = UserFactory()
unreg = UnregUserFactory()
name, email = fake.name(), fake_email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [
add_contributor_json(contrib),
serialize_unregistered(fake.name(), unreg.username),
unreg_no_record
]
contrib_data[0]['permission'] = 'admin'
contrib_data[1]['permission'] = 'write'
contrib_data[2]['permission'] = 'read'
contrib_data[0]['visible'] = True
contrib_data[1]['visible'] = True
contrib_data[2]['visible'] = True
res = deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator))
assert_equal(len(res), len(contrib_data))
assert_true(res[0]['user'].is_registered)
assert_false(res[1]['user'].is_registered)
assert_true(res[1]['user']._id)
assert_false(res[2]['user'].is_registered)
assert_true(res[2]['user']._id)
def test_deserialize_contributors_validates_fullname(self):
name = '<img src=1 onerror=console.log(1)>'
email = fake_email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_deserialize_contributors_validates_email(self):
name = fake.name()
email = '!@#$%%^&*'
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_serialize_unregistered_with_record(self):
name, email = fake.name(), fake_email()
user = self.project.add_unregistered_contributor(fullname=name,
email=email, auth=Auth(self.project.creator))
self.project.save()
res = serialize_unregistered(
fullname=name,
email=email
)
assert_false(res['active'])
assert_false(res['registered'])
assert_equal(res['id'], user._primary_key)
assert_true(res['profile_image_url'])
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
def test_add_contributor_with_unreg_contribs_and_reg_contribs(self):
n_contributors_pre = len(self.project.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake_email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.contributors),
n_contributors_pre + len(payload['users']))
new_unreg = auth.get_user(email=email)
assert_false(new_unreg.is_registered)
# unclaimed record was added
new_unreg.reload()
assert_in(self.project._primary_key, new_unreg.unclaimed_records)
rec = new_unreg.get_unclaimed_record(self.project._primary_key)
assert_equal(rec['name'], name)
assert_equal(rec['email'], email)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_add_contributors_post_only_sends_one_email_to_unreg_user(
self, mock_send_claim_email):
# Project has components
comp1, comp2 = NodeFactory(
creator=self.creator), NodeFactory(creator=self.creator)
NodeRelation.objects.create(parent=self.project, child=comp1)
NodeRelation.objects.create(parent=self.project, child=comp2)
self.project.save()
# An unreg user is added to the project AND its components
unreg_user = { # dict because user has not previous unreg record
'id': None,
'registered': False,
'fullname': fake.name(),
'email': fake_email(),
'permission': 'admin',
'visible': True,
}
payload = {
'users': [unreg_user],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert_true(self.project.can_edit(user=self.creator))
self.app.post_json(url, payload, auth=self.creator.auth)
# finalize_invitation should only have been called once
assert_equal(mock_send_claim_email.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail):
# Project has components
comp1 = NodeFactory(creator=self.creator, parent=self.project)
comp2 = NodeFactory(creator=self.creator, parent=self.project)
# A registered user is added to the project AND its components
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail should only have been called once
assert_equal(mock_send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail):
# Project has a component with a sub-component
component = NodeFactory(creator=self.creator, parent=self.project)
sub_component = NodeFactory(creator=self.creator, parent=component)
# A registered user is added to the project and the sub-component, but NOT the component
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [sub_component._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail is called for both the project and the sub-component
assert_equal(mock_send_mail.call_count, 2)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_email_sent_when_unreg_user_is_added(self, send_mail):
name, email = fake.name(), fake_email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
payload = {
'users': [pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
assert_true(send_mail.called)
assert_true(send_mail.called_with(email=email))
@mock.patch('website.mails.send_mail')
def test_email_sent_when_reg_user_is_added(self, send_mail):
contributor = UserFactory()
contributors = [{
'user': contributor,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory(creator=self.auth.user)
project.add_contributors(contributors, auth=self.auth)
project.save()
assert_true(send_mail.called)
send_mail.assert_called_with(
contributor.username,
mails.CONTRIBUTOR_ADDED_DEFAULT,
user=contributor,
node=project,
mimetype='html',
referrer_name=self.auth.user.fullname,
all_global_subscriptions_none=False,
branded_service=None,
can_change_preferences=False,
logo=settings.OSF_LOGO,
osf_contact_email=settings.OSF_CONTACT_EMAIL,
published_preprints=[]
)
assert_almost_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()), delta=1)
@mock.patch('website.mails.send_mail')
def test_contributor_added_email_sent_to_unreg_user(self, send_mail):
unreg_user = UnregUserFactory()
project = ProjectFactory()
project.add_unregistered_contributor(fullname=unreg_user.fullname, email=unreg_user.email, auth=Auth(project.creator))
project.save()
assert_true(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_forking_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.fork_node(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_templating_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.use_as_template(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.mails.send_mail')
def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive):
project = ProjectFactory()
provider = RegistrationProviderFactory()
project.register_node(get_default_metaschema(), Auth(user=project.creator), '', None, provider=provider)
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail):
contributor = UserFactory()
project = ProjectFactory()
auth = Auth(project.creator)
notify_added_contributor(project, contributor, auth)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
notify_added_contributor(project, contributor, auth)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail):
throttle = 0.5
contributor = UserFactory()
project = ProjectFactory()
auth = Auth(project.creator)
notify_added_contributor(project, contributor, auth, throttle=throttle)
assert_true(send_mail.called)
time.sleep(1) # throttle period expires
notify_added_contributor(project, contributor, auth, throttle=throttle)
assert_equal(send_mail.call_count, 2)
@mock.patch('website.mails.send_mail')
def test_add_contributor_to_fork_sends_email(self, send_mail):
contributor = UserFactory()
fork = self.project.fork_node(auth=Auth(self.creator))
fork.add_contributor(contributor, auth=Auth(self.creator))
fork.save()
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributor_to_template_sends_email(self, send_mail):
contributor = UserFactory()
template = self.project.use_as_template(auth=Auth(self.creator))
template.add_contributor(contributor, auth=Auth(self.creator))
template.save()
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_creating_fork_does_not_email_creator(self, send_mail):
contributor = UserFactory()
fork = self.project.fork_node(auth=Auth(self.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_creating_template_does_not_email_creator(self, send_mail):
contributor = UserFactory()
template = self.project.use_as_template(auth=Auth(self.creator))
assert_false(send_mail.called)
def test_add_multiple_contributors_only_adds_one_log(self):
n_logs_pre = self.project.logs.count()
reg_user = UserFactory()
name = fake.name()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': fake_email(),
'permission': 'write',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(self.project.logs.count(), n_logs_pre + 1)
def test_add_contribs_to_multiple_nodes(self):
child = NodeFactory(parent=self.project, creator=self.creator)
n_contributors_pre = child.contributors.count()
reg_user = UserFactory()
name, email = fake.name(), fake_email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': [self.project._primary_key, child._primary_key]
}
url = '/api/v1/project/{0}/contributors/'.format(self.project._id)
self.app.post_json(url, payload).maybe_follow()
child.reload()
assert_equal(child.contributors.count(),
n_contributors_pre + len(payload['users']))
def tearDown(self):
super(TestAddingContributorViews, self).tearDown()
contributor_added.disconnect(notify_added_contributor)
class TestUserInviteViews(OsfTestCase):
def setUp(self):
super(TestUserInviteViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format(
self.project._primary_key)
def test_invite_contributor_post_if_not_in_db(self):
name, email = fake.name(), fake_email()
res = self.app.post_json(
self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth,
)
contrib = res.json['contributor']
assert_true(contrib['id'] is None)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_unreg_already_in_db(self):
# A n unreg user is added to a different project
name, email = fake.name(), fake_email()
project2 = ProjectFactory()
unreg_user = project2.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project2.creator))
project2.save()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email}, auth=self.user.auth)
expected = add_contributor_json(unreg_user)
expected['fullname'] = name
expected['email'] = email
assert_equal(res.json['contributor'], expected)
def test_invite_contributor_post_if_email_already_registered(self):
reg_user = UserFactory()
name, email = fake.name(), reg_user.username
# Tries to invite user that is already registered - this is now permitted.
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth)
contrib = res.json['contributor']
assert_equal(contrib['id'], reg_user._id)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_user_is_already_contributor(self):
unreg_user = self.project.add_unregistered_contributor(
fullname=fake.name(), email=fake_email(),
auth=Auth(self.project.creator)
)
self.project.save()
# Tries to invite unreg user that is already a contributor
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': unreg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_with_no_email(self):
name = fake.name()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': None}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
data = res.json
assert_equal(data['status'], 'success')
assert_equal(data['contributor']['fullname'], name)
assert_true(data['contributor']['email'] is None)
assert_false(data['contributor']['registered'])
def test_invite_contributor_requires_fullname(self):
res = self.app.post_json(self.invite_url,
{'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_given_email(self, send_mail):
project = ProjectFactory()
given_email = fake_email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=given_email, unclaimed_user=unreg_user, node=project)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=given_email,
mail=mails.INVITE_DEFAULT,
can_change_preferences=False,
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_referrer(self, send_mail):
project = ProjectFactory()
referrer = project.creator
given_email, real_email = fake_email(), fake_email()
unreg_user = project.add_unregistered_contributor(fullname=fake.name(),
email=given_email, auth=Auth(
referrer)
)
project.save()
send_claim_email(email=real_email, unclaimed_user=unreg_user, node=project)
assert_true(send_mail.called)
# email was sent to referrer
send_mail.assert_called_with(
referrer.username,
mails.FORWARD_INVITE,
user=unreg_user,
referrer=referrer,
claim_url=unreg_user.get_claim_url(project._id, external=True),
email=real_email.lower().strip(),
fullname=unreg_user.get_unclaimed_record(project._id)['name'],
node=project,
branded_service=None,
can_change_preferences=False,
logo=settings.OSF_LOGO,
osf_contact_email=settings.OSF_CONTACT_EMAIL
)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_before_throttle_expires(self, send_mail):
project = ProjectFactory()
given_email = fake_email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=fake_email(), unclaimed_user=unreg_user, node=project)
send_mail.reset_mock()
# 2nd call raises error because throttle hasn't expired
with assert_raises(HTTPError):
send_claim_email(email=fake_email(), unclaimed_user=unreg_user, node=project)
assert_false(send_mail.called)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_quickfiles_creation
class TestClaimViews(OsfTestCase):
def setUp(self):
super(TestClaimViews, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
self.given_name = fake.name()
self.given_email = fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.project.save()
@mock.patch('website.project.views.contributor.send_claim_email')
def test_claim_user_already_registered_redirects_to_claim_user_registered(self, claim_email):
name = fake.name()
email = fake_email()
# project contributor adds an unregistered contributor (without an email) on public project
unregistered_user = self.project.add_unregistered_contributor(
fullname=name,
email=None,
auth=Auth(user=self.referrer)
)
assert_in(unregistered_user, self.project.contributors)
# unregistered user comes along and claims themselves on the public project, entering an email
invite_url = self.project.api_url_for('claim_user_post', uid='undefined')
self.app.post_json(invite_url, {
'pk': unregistered_user._primary_key,
'value': email
})
assert_equal(claim_email.call_count, 1)
# set unregistered record email since we are mocking send_claim_email()
unclaimed_record = unregistered_user.get_unclaimed_record(self.project._primary_key)
unclaimed_record.update({'email': email})
unregistered_user.save()
# unregistered user then goes and makes an account with same email, before claiming themselves as contributor
UserFactory(username=email, fullname=name)
# claim link for the now registered email is accessed while not logged in
token = unregistered_user.get_unclaimed_record(self.project._primary_key)['token']
claim_url = '/user/{uid}/{pid}/claim/?token={token}'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
res = self.app.get(claim_url)
# should redirect to 'claim_user_registered' view
claim_registered_url = '/user/{uid}/{pid}/claim/verify/{token}/'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
assert_equal(res.status_code, 302)
assert_in(claim_registered_url, res.headers.get('Location'))
@mock.patch('website.project.views.contributor.send_claim_email')
def test_claim_user_already_registered_secondary_email_redirects_to_claim_user_registered(self, claim_email):
name = fake.name()
email = fake_email()
secondary_email = fake_email()
# project contributor adds an unregistered contributor (without an email) on public project
unregistered_user = self.project.add_unregistered_contributor(
fullname=name,
email=None,
auth=Auth(user=self.referrer)
)
assert_in(unregistered_user, self.project.contributors)
# unregistered user comes along and claims themselves on the public project, entering an email
invite_url = self.project.api_url_for('claim_user_post', uid='undefined')
self.app.post_json(invite_url, {
'pk': unregistered_user._primary_key,
'value': secondary_email
})
assert_equal(claim_email.call_count, 1)
# set unregistered record email since we are mocking send_claim_email()
unclaimed_record = unregistered_user.get_unclaimed_record(self.project._primary_key)
unclaimed_record.update({'email': secondary_email})
unregistered_user.save()
# unregistered user then goes and makes an account with same email, before claiming themselves as contributor
registered_user = UserFactory(username=email, fullname=name)
registered_user.emails.create(address=secondary_email)
registered_user.save()
# claim link for the now registered email is accessed while not logged in
token = unregistered_user.get_unclaimed_record(self.project._primary_key)['token']
claim_url = '/user/{uid}/{pid}/claim/?token={token}'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
res = self.app.get(claim_url)
# should redirect to 'claim_user_registered' view
claim_registered_url = '/user/{uid}/{pid}/claim/verify/{token}/'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
assert_equal(res.status_code, 302)
assert_in(claim_registered_url, res.headers.get('Location'))
def test_claim_user_invited_with_no_email_posts_to_claim_form(self):
given_name = fake.name()
invited_user = self.project.add_unregistered_contributor(
fullname=given_name,
email=None,
auth=Auth(user=self.referrer)
)
self.project.save()
url = invited_user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
}, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_with_registered_user_id(self, send_mail):
# registered user who is attempting to claim the unclaimed contributor
reg_user = UserFactory()
payload = {
# pk of unreg user record
'pk': self.user._primary_key,
'claimerId': reg_user._primary_key
}
url = '/api/v1/user/{uid}/{pid}/claim/email/'.format(
uid=self.user._primary_key,
pid=self.project._primary_key,
)
res = self.app.post_json(url, payload)
# mail was sent
assert_equal(send_mail.call_count, 2)
# ... to the correct address
referrer_call = send_mail.call_args_list[0]
claimer_call = send_mail.call_args_list[1]
args, _ = referrer_call
assert_equal(args[0], self.referrer.username)
args, _ = claimer_call
assert_equal(args[0], reg_user.username)
# view returns the correct JSON
assert_equal(res.json, {
'status': 'success',
'email': reg_user.username,
'fullname': self.given_name,
})
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unclaimed_user=self.user,
node=self.project
)
assert_equal(mock_send_mail.call_count, 2)
first_call_args = mock_send_mail.call_args_list[0][0]
assert_equal(first_call_args[0], self.referrer.username)
second_call_args = mock_send_mail.call_args_list[1][0]
assert_equal(second_call_args[0], reg_user.username)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unclaimed_user=self.user,
node=self.project,
)
mock_send_mail.reset_mock()
# second call raises error because it was called before throttle period
with assert_raises(HTTPError):
send_claim_registered_email(
claimer=reg_user,
unclaimed_user=self.user,
node=self.project,
)
assert_false(mock_send_mail.called)
@mock.patch('website.project.views.contributor.send_claim_registered_email')
def test_claim_user_post_with_email_already_registered_sends_correct_email(
self, send_claim_registered_email):
reg_user = UserFactory()
payload = {
'value': reg_user.username,
'pk': self.user._primary_key
}
url = self.project.api_url_for('claim_user_post', uid=self.user._id)
self.app.post_json(url, payload)
assert_true(send_claim_registered_email.called)
def test_user_with_removed_unclaimed_url_claiming(self):
""" Tests that when an unclaimed user is removed from a project, the
unregistered user object does not retain the token.
"""
self.project.remove_contributor(self.user, Auth(user=self.referrer))
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_user_with_claim_url_cannot_claim_twice(self):
""" Tests that when an unclaimed user is replaced on a project with a
claimed user, the unregistered user object does not retain the token.
"""
reg_user = AuthUserFactory()
self.project.replace_contributor(self.user, reg_user)
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self):
reg_user = AuthUserFactory()
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url, auth=reg_user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=reg_user.auth)
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
expected = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token=token,
)
assert_equal(res.request.path, expected)
@mock.patch('framework.auth.cas.make_response_from_ticket')
def test_claim_user_when_user_is_registered_with_orcid(self, mock_response_from_ticket):
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
url = '/user/{uid}/{pid}/claim/verify/{token}/'.format(
uid=self.user._id,
pid=self.project._id,
token=token
)
# logged out user gets redirected to cas login
res = self.app.get(url)
assert res.status_code == 302
res = res.follow()
service_url = 'http://localhost:80{}'.format(url)
expected = cas.get_logout_url(service_url=cas.get_login_url(service_url=service_url))
assert res.request.url == expected
# user logged in with orcid automatically becomes a contributor
orcid_user, validated_credentials, cas_resp = generate_external_user_with_resp(url)
mock_response_from_ticket.return_value = authenticate(
orcid_user,
cas_resp.attributes.get('accessToken', ''),
redirect(url)
)
orcid_user.set_unusable_password()
orcid_user.save()
ticket = fake.md5()
url += '?ticket={}'.format(ticket)
res = self.app.get(url)
res = res.follow()
assert res.status_code == 302
assert self.project.is_contributor(orcid_user)
assert self.project.url in res.headers.get('Location')
def test_get_valid_form(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
def test_invalid_claim_form_raise_400(self):
uid = self.user._primary_key
pid = self.project._primary_key
url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals())
res = self.app.get(url, expect_errors=True).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_posting_to_claim_form_with_valid_data(self, mock_update_search_nodes):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
})
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
assert_in('username', location)
assert_in('verification_key', location)
assert_in(self.project._primary_key, location)
self.user.reload()
assert_true(self.user.is_registered)
assert_true(self.user.is_active)
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_posting_to_claim_form_removes_all_unclaimed_data(self, mock_update_search_nodes):
# user has multiple unclaimed records
p2 = ProjectFactory(creator=self.referrer)
self.user.add_unclaimed_record(p2, referrer=self.referrer,
given_name=fake.name())
self.user.save()
assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check
url = self.user.get_claim_url(self.project._primary_key)
self.app.post(url, {
'username': self.given_email,
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
})
self.user.reload()
assert_equal(self.user.unclaimed_records, {})
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_posting_to_claim_form_sets_fullname_to_given_name(self, mock_update_search_nodes):
# User is created with a full name
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
# User invited with a different name
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.project.creator),
)
self.project.save()
# Goes to claim url
claim_url = new_user.get_claim_url(self.project._id)
self.app.post(claim_url, {
'username': unreg.username,
'password': 'killerqueen', 'password2': 'killerqueen'
})
unreg.reload()
# Full name was set correctly
assert_equal(unreg.fullname, different_name)
# CSL names were set correctly
parsed_name = impute_names_model(different_name)
assert_equal(unreg.given_name, parsed_name['given_name'])
assert_equal(unreg.family_name, parsed_name['family_name'])
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_returns_fullname(self, send_mail):
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
res = self.app.post_json(url,
{'value': self.given_email,
'pk': self.user._primary_key},
auth=self.referrer.auth)
assert_equal(res.json['fullname'], self.given_name)
assert_true(send_mail.called)
assert_true(send_mail.called_with(to_addr=self.given_email))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail):
email = fake_email() # email that is different from the one the referrer gave
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
self.app.post_json(url,
{'value': email, 'pk': self.user._primary_key}
)
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 2)
call_to_invited = send_mail.mock_calls[0]
assert_true(call_to_invited.called_with(
to_addr=email
))
call_to_referrer = send_mail.mock_calls[1]
assert_true(call_to_referrer.called_with(
to_addr=self.given_email
))
def test_claim_url_with_bad_token_returns_400(self):
url = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token='badtoken',
)
res = self.app.get(url, auth=self.referrer.auth, expect_errors=400)
assert_equal(res.status_code, 400)
def test_cannot_claim_user_with_user_who_is_already_contributor(self):
# user who is already a contirbutor to the project
contrib = AuthUserFactory()
self.project.add_contributor(contrib, auth=Auth(self.project.creator))
self.project.save()
# Claiming user goes to claim url, but contrib is already logged in
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(
url,
auth=contrib.auth,
).follow(
auth=contrib.auth,
expect_errors=True,
)
# Response is a 400
assert_equal(res.status_code, 400)
@pytest.mark.enable_bookmark_creation
class TestPointerViews(OsfTestCase):
def setUp(self):
super(TestPointerViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def _make_pointer_only_user_can_see(self, user, project, save=False):
node = ProjectFactory(creator=user)
project.add_pointer(node, auth=Auth(user=user), save=save)
def test_pointer_list_write_contributor_can_remove_private_component_entry(self):
"""Ensure that write contributors see the button to delete a pointer,
even if they cannot see what it is pointing at"""
url = web_url_for('view_project', pid=self.project._id)
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS)
self._make_pointer_only_user_can_see(user2, self.project)
self.project.save()
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, 200)
has_controls = res.lxml.xpath('//li[@node_id]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]')
assert_true(has_controls)
def test_pointer_list_write_contributor_can_remove_public_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
for i in range(3):
self.project.add_pointer(ProjectFactory(creator=self.user),
auth=Auth(user=self.user))
self.project.save()
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, 200)
has_controls = res.lxml.xpath(
'//li[@node_id]//i[contains(@class, "remove-pointer")]')
assert_equal(len(has_controls), 3)
def test_pointer_list_read_contributor_cannot_remove_private_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=[permissions.READ])
self._make_pointer_only_user_can_see(user2, self.project)
self.project.save()
res = self.app.get(url, auth=user2.auth).maybe_follow()
assert_equal(res.status_code, 200)
pointer_nodes = res.lxml.xpath('//li[@node_id]')
has_controls = res.lxml.xpath('//li[@node_id]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]')
assert_equal(len(pointer_nodes), 1)
assert_false(has_controls)
def test_pointer_list_read_contributor_cannot_remove_public_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
self.project.add_pointer(ProjectFactory(creator=self.user,
is_public=True),
auth=Auth(user=self.user))
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=[permissions.READ])
self.project.save()
res = self.app.get(url, auth=user2.auth).maybe_follow()
assert_equal(res.status_code, 200)
pointer_nodes = res.lxml.xpath('//li[@node_id]')
has_controls = res.lxml.xpath(
'//li[@node_id]//i[contains(@class, "remove-pointer")]')
assert_equal(len(pointer_nodes), 1)
assert_equal(len(has_controls), 0)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_get_pointed_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = ProjectFactory(creator=self.user) # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in an organizer collection
collection = CollectionFactory(creator=pointed_project.creator)
collection.collect_object(pointed_project, self.user)
url = pointed_project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# pointer_project's id is included in response, but folder's id is not
pointer_ids = [each['id'] for each in res.json['pointed']]
assert_in(pointer_project._id, pointer_ids)
assert_not_in(collection._id, pointer_ids)
def test_add_pointers(self):
url = self.project.api_url + 'pointer/'
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
self.project.nodes_active.count(),
5
)
def test_add_the_same_pointer_more_than_once(self):
url = self.project.api_url + 'pointer/'
double_node = NodeFactory()
self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
)
res = self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_add_pointers_no_user_logg_in(self):
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
res = self.app.post_json(
url,
{'nodeIds': node_ids},
auth=None,
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_add_pointers_public_non_contributor(self):
project2 = ProjectFactory()
project2.set_privacy('public')
project2.save()
url = self.project.api_url_for('add_pointers')
self.app.post_json(
url,
{'nodeIds': [project2._id]},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
self.project.nodes_active.count(),
1
)
def test_add_pointers_contributor(self):
user2 = AuthUserFactory()
self.project.add_contributor(user2)
self.project.save()
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=user2.auth,
).maybe_follow()
self.project.reload()
assert_equal(
self.project.linked_nodes.count(),
5
)
def test_add_pointers_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.delete_json(
url,
{'pointerId': pointer.node._id},
auth=self.user.auth,
)
self.project.reload()
assert_equal(
len(list(self.project.nodes)),
0
)
def test_remove_pointer_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_found(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': 'somefakeid'},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_forking_pointer_works(self):
url = self.project.api_url + 'pointer/fork/'
linked_node = NodeFactory(creator=self.user)
pointer = self.project.add_pointer(linked_node, auth=self.consolidate_auth)
assert_true(linked_node.id, pointer.child.id)
res = self.app.post_json(url, {'nodeId': pointer.child._id}, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_in('node', res.json['data'])
fork = res.json['data']['node']
assert_equal(fork['title'], 'Fork of {}'.format(linked_node.title))
def test_fork_pointer_not_provided(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(url, {}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_found(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'nodeId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'nodeId': 'somefakeid'},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_before_register_with_pointer(self):
# Assert that link warning appears in before register callback.
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 1)
def test_before_fork_with_pointer(self):
"""Assert that link warning appears in before fork callback."""
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'These links will be copied into your registration,' in prompt
]
assert_equal(len(prompts), 1)
def test_before_register_no_pointer(self):
"""Assert that link warning does not appear in before register callback."""
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 0)
def test_before_fork_no_pointer(self):
"""Assert that link warning does not appear in before fork callback."""
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 0)
def test_get_pointed(self):
pointing_node = ProjectFactory(creator=self.user)
pointing_node.add_pointer(self.project, auth=Auth(self.user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], pointing_node.url)
assert_equal(pointed[0]['title'], pointing_node.title)
assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node))
def test_get_pointed_private(self):
secret_user = UserFactory()
pointing_node = ProjectFactory(creator=secret_user)
pointing_node.add_pointer(self.project, auth=Auth(secret_user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], None)
assert_equal(pointed[0]['title'], 'Private Component')
assert_equal(pointed[0]['authorShort'], 'Private Author(s)')
def test_can_template_project_linked_to_each_other(self):
project2 = ProjectFactory(creator=self.user)
self.project.add_pointer(project2, auth=Auth(user=self.user))
project2.add_pointer(self.project, auth=Auth(user=self.user))
template = self.project.use_as_template(auth=Auth(user=self.user))
assert_true(template)
assert_equal(template.title, 'Templated from ' + self.project.title)
assert_not_in(project2, template.linked_nodes)
class TestPublicViews(OsfTestCase):
def test_explore(self):
res = self.app.get('/explore/').maybe_follow()
assert_equal(res.status_code, 200)
@pytest.mark.enable_quickfiles_creation
class TestAuthViews(OsfTestCase):
def setUp(self):
super(TestAuthViews, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_ok(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
user = OSFUser.objects.get(username=email)
assert_equal(user.fullname, name)
assert_equal(user.accepted_terms_of_service, None)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_case_insensitive(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
}
)
user = OSFUser.objects.get(username=email)
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_with_accepted_tos(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
'acceptedTermsOfService': True
}
)
user = OSFUser.objects.get(username=email)
assert_true(user.accepted_terms_of_service)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_without_accepted_tos(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
'acceptedTermsOfService': False
}
)
user = OSFUser.objects.get(username=email)
assert_equal(user.accepted_terms_of_service, None)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_scrubs_username(self, _):
url = api_url_for('register_user')
name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>"
email, password = fake_email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()"
user = OSFUser.objects.get(username=email)
assert_equal(res.status_code, http.OK)
assert_equal(user.fullname, expected_scrub_username)
def test_register_email_mismatch(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email + 'lol',
'password': password,
},
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
users = OSFUser.objects.filter(username=email)
assert_equal(users.count(), 0)
def test_register_email_already_registered(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), fake.password()
existing_user = UserFactory(
username=email,
)
res = self.app.post_json(
url, {
'fullName': name,
'email1': email,
'email2': email,
'password': password
},
expect_errors=True
)
assert_equal(res.status_code, http.CONFLICT)
users = OSFUser.objects.filter(username=email)
assert_equal(users.count(), 1)
def test_register_blacklisted_email_domain(self):
BlacklistedEmailDomain.objects.get_or_create(domain='mailinator.com')
url = api_url_for('register_user')
name, email, password = fake.name(), 'bad@mailinator.com', 'agreatpasswordobviously'
res = self.app.post_json(
url, {
'fullName': name,
'email1': email,
'email2': email,
'password': password
},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
users = OSFUser.objects.filter(username=email)
assert_equal(users.count(), 0)
@mock.patch('framework.auth.views.validate_recaptcha', return_value=True)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_good_captcha(self, _, validate_recaptcha):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
captcha = 'some valid captcha'
with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'):
resp = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
'g-recaptcha-response': captcha,
}
)
validate_recaptcha.assert_called_with(captcha, remote_ip=None)
assert_equal(resp.status_code, http.OK)
user = OSFUser.objects.get(username=email)
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.validate_recaptcha', return_value=False)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_missing_captcha(self, _, validate_recaptcha):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'):
resp = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
# 'g-recaptcha-response': 'supposed to be None',
},
expect_errors=True
)
validate_recaptcha.assert_called_with(None, remote_ip=None)
assert_equal(resp.status_code, http.BAD_REQUEST)
@mock.patch('framework.auth.views.validate_recaptcha', return_value=False)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_bad_captcha(self, _, validate_recaptcha):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'):
resp = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
'g-recaptcha-response': 'bad captcha',
},
expect_errors=True
)
assert_equal(resp.status_code, http.BAD_REQUEST)
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_register_after_being_invited_as_unreg_contributor(self, mock_update_search_nodes):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/861
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026
# A user is invited as an unregistered contributor
project = ProjectFactory()
name, email = fake.name(), fake_email()
project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project.creator))
project.save()
# The new, unregistered user
new_user = OSFUser.objects.get(username=email)
# Instead of following the invitation link, they register at the regular
# registration page
# They use a different name when they register, but same email
real_name = fake.name()
password = 'myprecious'
url = api_url_for('register_user')
payload = {
'fullName': real_name,
'email1': email,
'email2': email,
'password': password,
}
# Send registration request
self.app.post_json(url, payload)
new_user.reload()
# New user confirms by following confirmation link
confirm_url = new_user.get_confirmation_url(email, external=False)
self.app.get(confirm_url)
new_user.reload()
# Password and fullname should be updated
assert_true(new_user.is_confirmed)
assert_true(new_user.check_password(password))
assert_equal(new_user.fullname, real_name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_sends_user_registered_signal(self, mock_send_confirm_email):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered,
auth.signals.unconfirmed_user_created]))
assert_true(mock_send_confirm_email.called)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation(self, send_mail):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=email
))
self.user.reload()
assert_not_equal(token, self.user.get_confirmation_token(email))
with assert_raises(InvalidTokenError):
self.user.get_unconfirmed_email_for_token(token)
@mock.patch('framework.auth.views.mails.send_mail')
def test_click_confirmation_email(self, send_mail):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username)
res = self.app.get(url)
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], True)
assert_equal(res.status_code, 302)
login_url = 'login?service'
assert_in(login_url, res.body)
def test_get_email_to_add_no_email(self):
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications, [])
def test_get_unconfirmed_email(self):
email = 'test@mail.com'
self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications, [])
def test_get_email_to_add(self):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username)
self.app.get(url)
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], True)
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications[0]['address'], 'test@mail.com')
def test_add_email(self):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
put_email_url = api_url_for('unconfirmed_email_add')
res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(res.json_body['status'], 'success')
assert_equal(self.user.emails.last().address, 'test@mail.com')
def test_remove_email(self):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
remove_email_url = api_url_for('unconfirmed_email_remove')
remove_res = self.app.delete_json(remove_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(remove_res.json_body['status'], 'success')
assert_equal(self.user.unconfirmed_email_info, [])
def test_add_expired_email(self):
# Do not return expired token and removes it from user.email_verifications
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.email_verifications[token]['expiration'] = timezone.now() - dt.timedelta(days=100)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['email'], email)
self.user.clean_email_verifications(given_token=token)
unconfirmed_emails = self.user.unconfirmed_email_info
assert_equal(unconfirmed_emails, [])
assert_equal(self.user.email_verifications, {})
def test_clean_email_verifications(self):
# Do not return bad token and removes it from user.email_verifications
email = 'test@mail.com'
token = 'blahblahblah'
self.user.email_verifications[token] = {'expiration': timezone.now() + dt.timedelta(days=1),
'email': email,
'confirmed': False }
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['email'], email)
self.user.clean_email_verifications(given_token=token)
unconfirmed_emails = self.user.unconfirmed_email_info
assert_equal(unconfirmed_emails, [])
assert_equal(self.user.email_verifications, {})
def test_clean_email_verifications_when_email_verifications_is_an_empty_dict(self):
self.user.email_verifications = {}
self.user.save()
ret = self.user.clean_email_verifications()
assert_equal(ret, None)
assert_equal(self.user.email_verifications, {})
def test_add_invalid_email(self):
# Do not return expired token and removes it from user.email_verifications
email = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello@yourmom.com'
# illegal_str = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello'
# illegal_str += unichr(0xd800) + unichr(0xdbff) + ' World'
# email = 'test@mail.com'
with assert_raises(ValidationError):
self.user.add_unconfirmed_email(email)
def test_add_email_merge(self):
email = 'copy@cat.com'
dupe = UserFactory(
username=email,
)
dupe.save()
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
put_email_url = api_url_for('unconfirmed_email_add')
res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(res.json_body['status'], 'success')
assert_equal(self.user.emails.last().address, 'copy@cat.com')
def test_resend_confirmation_without_user_id(self):
email = 'test@mail.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
def test_resend_confirmation_without_email(self):
url = api_url_for('resend_confirmation')
res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_resend_confirmation_not_work_for_primary_email(self):
email = 'test@mail.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': True, 'confirmed': False}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_resend_confirmation_not_work_for_confirmed_email(self):
email = 'test@mail.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': True}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_does_not_send_before_throttle_expires(self, send_mail):
email = 'test@mail.com'
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_confirm_email_clears_unclaimed_records_and_revokes_token(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
# sanity check
assert_equal(len(unclaimed_user.email_verifications.keys()), 1)
# user goes to email confirmation link
token = unclaimed_user.get_confirmation_token(unclaimed_user.username)
url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
# unclaimed records and token are cleared
unclaimed_user.reload()
assert_equal(unclaimed_user.unclaimed_records, {})
assert_equal(len(unclaimed_user.email_verifications.keys()), 0)
def test_confirmation_link_registers_user(self):
user = OSFUser.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May')
assert_false(user.is_registered) # sanity check
user.save()
confirmation_url = user.get_confirmation_url('brian@queen.com', external=False)
res = self.app.get(confirmation_url)
assert_equal(res.status_code, 302, 'redirects to settings page')
res = res.follow()
user.reload()
assert_true(user.is_registered)
class TestAuthLoginAndRegisterLogic(OsfTestCase):
def setUp(self):
super(TestAuthLoginAndRegisterLogic, self).setUp()
self.no_auth = Auth()
self.user_auth = AuthUserFactory()
self.auth = Auth(user=self.user_auth)
self.next_url = web_url_for('my_projects', _absolute=True)
self.invalid_campaign = 'invalid_campaign'
def test_osf_login_with_auth(self):
# login: user with auth
data = login_and_register_handler(self.auth)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_osf_login_without_auth(self):
# login: user without auth
data = login_and_register_handler(self.no_auth)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_osf_register_with_auth(self):
# register: user with auth
data = login_and_register_handler(self.auth, login=False)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_osf_register_without_auth(self):
# register: user without auth
data = login_and_register_handler(self.no_auth, login=False)
assert_equal(data.get('status_code'), http.OK)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_next_url_login_with_auth(self):
# next_url login: user with auth
data = login_and_register_handler(self.auth, next_url=self.next_url)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), self.next_url)
def test_next_url_login_without_auth(self):
# login: user without auth
request.url = web_url_for('auth_login', next=self.next_url, _absolute=True)
data = login_and_register_handler(self.no_auth, next_url=self.next_url)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), get_login_url(request.url))
def test_next_url_register_with_auth(self):
# register: user with auth
data = login_and_register_handler(self.auth, login=False, next_url=self.next_url)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), self.next_url)
def test_next_url_register_without_auth(self):
# register: user without auth
data = login_and_register_handler(self.no_auth, login=False, next_url=self.next_url)
assert_equal(data.get('status_code'), http.OK)
assert_equal(data.get('next_url'), request.url)
def test_institution_login_and_register(self):
pass
def test_institution_login_with_auth(self):
# institution login: user with auth
data = login_and_register_handler(self.auth, campaign='institution')
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_institution_login_without_auth(self):
# institution login: user without auth
data = login_and_register_handler(self.no_auth, campaign='institution')
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(
data.get('next_url'),
get_login_url(web_url_for('dashboard', _absolute=True), campaign='institution'))
def test_institution_login_next_url_with_auth(self):
# institution login: user with auth and next url
data = login_and_register_handler(self.auth, next_url=self.next_url, campaign='institution')
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), self.next_url)
def test_institution_login_next_url_without_auth(self):
# institution login: user without auth and next url
data = login_and_register_handler(self.no_auth, next_url=self.next_url ,campaign='institution')
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(
data.get('next_url'),
get_login_url(self.next_url, campaign='institution'))
def test_institution_regsiter_with_auth(self):
# institution register: user with auth
data = login_and_register_handler(self.auth, login=False, campaign='institution')
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_institution_register_without_auth(self):
# institution register: user without auth
data = login_and_register_handler(self.no_auth, login=False, campaign='institution')
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(
data.get('next_url'),
get_login_url(web_url_for('dashboard', _absolute=True), campaign='institution')
)
def test_campaign_login_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user with auth
data = login_and_register_handler(self.auth, campaign=campaign)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), campaign_url_for(campaign))
def test_campaign_login_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user without auth
data = login_and_register_handler(self.no_auth, campaign=campaign)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(
data.get('next_url'),
web_url_for('auth_register', campaign=campaign, next=campaign_url_for(campaign))
)
def test_campaign_register_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user with auth
data = login_and_register_handler(self.auth, login=False, campaign=campaign)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), campaign_url_for(campaign))
def test_campaign_register_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user without auth
data = login_and_register_handler(self.no_auth, login=False, campaign=campaign)
assert_equal(data.get('status_code'), http.OK)
if is_native_login(campaign):
# native campaign: prereg and erpc
assert_equal(data.get('next_url'), campaign_url_for(campaign))
elif is_proxy_login(campaign):
# proxy campaign: preprints and branded ones
assert_equal(
data.get('next_url'),
web_url_for('auth_login', next=campaign_url_for(campaign), _absolute=True)
)
def test_campaign_next_url_login_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user with auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.auth, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), next_url)
def test_campaign_next_url_login_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user without auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.no_auth, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(
data.get('next_url'),
web_url_for('auth_register', campaign=campaign, next=next_url)
)
def test_campaign_next_url_register_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user with auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.auth, login=False, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http.FOUND)
assert_equal(data.get('next_url'), next_url)
def test_campaign_next_url_register_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user without auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.no_auth, login=False, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http.OK)
if is_native_login(campaign):
# native campaign: prereg and erpc
assert_equal(data.get('next_url'), next_url)
elif is_proxy_login(campaign):
# proxy campaign: preprints and branded ones
assert_equal(
data.get('next_url'),
web_url_for('auth_login', next= next_url, _absolute=True)
)
def test_invalid_campaign_login_without_auth(self):
data = login_and_register_handler(
self.no_auth,
login=True,
campaign=self.invalid_campaign,
next_url=self.next_url
)
redirect_url = web_url_for('auth_login', campaigns=None, next=self.next_url)
assert_equal(data['status_code'], http.FOUND)
assert_equal(data['next_url'], redirect_url)
assert_equal(data['campaign'], None)
def test_invalid_campaign_register_without_auth(self):
data = login_and_register_handler(
self.no_auth,
login=False,
campaign=self.invalid_campaign,
next_url=self.next_url
)
redirect_url = web_url_for('auth_register', campaigns=None, next=self.next_url)
assert_equal(data['status_code'], http.FOUND)
assert_equal(data['next_url'], redirect_url)
assert_equal(data['campaign'], None)
# The following two tests handles the special case for `claim_user_registered`
# When an authenticated user clicks the claim confirmation clink, there are two ways to trigger this flow:
# 1. If the authenticated user is already a contributor to the project, OSF will ask the user to sign out
# by providing a "logout" link.
# 2. If the authenticated user is not a contributor but decides not to claim contributor under this account,
# OSF provides a link "not <username>?" for the user to logout.
# Both links will land user onto the register page with "MUST LOGIN" push notification.
def test_register_logout_flag_with_auth(self):
# when user click the "logout" or "not <username>?" link, first step is to log user out
data = login_and_register_handler(self.auth, login=False, campaign=None, next_url=self.next_url, logout=True)
assert_equal(data.get('status_code'), 'auth_logout')
assert_equal(data.get('next_url'), self.next_url)
def test_register_logout_flage_without(self):
# the second step is to land user on register page with "MUST LOGIN" warning
data = login_and_register_handler(self.no_auth, login=False, campaign=None, next_url=self.next_url, logout=True)
assert_equal(data.get('status_code'), http.OK)
assert_equal(data.get('next_url'), self.next_url)
assert_true(data.get('must_login_warning'))
class TestAuthLogout(OsfTestCase):
def setUp(self):
super(TestAuthLogout, self).setUp()
self.goodbye_url = web_url_for('goodbye', _absolute=True)
self.redirect_url = web_url_for('forgot_password_get', _absolute=True)
self.valid_next_url = web_url_for('dashboard', _absolute=True)
self.invalid_next_url = 'http://localhost:1234/abcde'
self.auth_user = AuthUserFactory()
def tearDown(self):
super(TestAuthLogout, self).tearDown()
OSFUser.objects.all().delete()
assert_equal(OSFUser.objects.count(), 0)
def test_logout_with_valid_next_url_logged_in(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.valid_next_url)
resp = self.app.get(logout_url, auth=self.auth_user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_equal(cas.get_logout_url(logout_url), resp.headers['Location'])
def test_logout_with_valid_next_url_logged_out(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.valid_next_url)
resp = self.app.get(logout_url, auth=None)
assert_equal(resp.status_code, http.FOUND)
assert_equal(self.valid_next_url, resp.headers['Location'])
def test_logout_with_invalid_next_url_logged_in(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.invalid_next_url)
resp = self.app.get(logout_url, auth=self.auth_user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location'])
def test_logout_with_invalid_next_url_logged_out(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.invalid_next_url)
resp = self.app.get(logout_url, auth=None)
assert_equal(resp.status_code, http.FOUND)
assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location'])
def test_logout_with_redirect_url(self):
logout_url = web_url_for('auth_logout', _absolute=True, redirect_url=self.redirect_url)
resp = self.app.get(logout_url, auth=self.auth_user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_equal(cas.get_logout_url(self.redirect_url), resp.headers['Location'])
def test_logout_with_no_parameter(self):
logout_url = web_url_for('auth_logout', _absolute=True)
resp = self.app.get(logout_url, auth=None)
assert_equal(resp.status_code, http.FOUND)
assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location'])
class TestExternalAuthViews(OsfTestCase):
def setUp(self):
super(TestExternalAuthViews, self).setUp()
name, email = fake.name(), fake_email()
self.provider_id = fake.ean()
external_identity = {
'orcid': {
self.provider_id: 'CREATE'
}
}
self.user = OSFUser.create_unconfirmed(
username=email,
password=str(fake.password()),
fullname=name,
external_identity=external_identity,
)
self.user.save()
self.auth = Auth(self.user)
def test_external_login_email_get_with_invalid_session(self):
url = web_url_for('external_login_email_get')
resp = self.app.get(url, expect_errors=True)
assert_equal(resp.status_code, 401)
def test_external_login_confirm_email_get_with_another_user_logged_in(self):
another_user = AuthUserFactory()
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=another_user.auth)
assert_equal(res.status_code, 302, 'redirects to cas logout')
assert_in('/logout?service=', res.location)
assert_in(url, res.location)
def test_external_login_confirm_email_get_without_destination(self):
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid')
res = self.app.get(url, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400, 'bad request')
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_create(self, mock_welcome):
assert_false(self.user.is_registered)
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302, 'redirects to cas login')
assert_in('/login?service=', res.location)
assert_in('new=true', res.location)
assert_equal(mock_welcome.call_count, 1)
self.user.reload()
assert_equal(self.user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_true(self.user.is_registered)
assert_true(self.user.has_usable_password())
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_link(self, mock_link_confirm):
self.user.external_identity['orcid'][self.provider_id] = 'LINK'
self.user.save()
assert_false(self.user.is_registered)
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302, 'redirects to cas login')
assert_in('/login?service=', res.location)
assert_not_in('new=true', res.location)
assert_equal(mock_link_confirm.call_count, 1)
self.user.reload()
assert_equal(self.user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_true(self.user.is_registered)
assert_true(self.user.has_usable_password())
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_duped_id(self, mock_confirm):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'CREATE'}})
assert_equal(dupe_user.external_identity, self.user.external_identity)
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302, 'redirects to cas login')
assert_in('/login?service=', res.location)
assert_equal(mock_confirm.call_count, 1)
self.user.reload()
dupe_user.reload()
assert_equal(self.user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_equal(dupe_user.external_identity, {})
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_duping_id(self, mock_confirm):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'VERIFIED'}})
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 403, 'only allows one user to link an id')
assert_equal(mock_confirm.call_count, 0)
self.user.reload()
dupe_user.reload()
assert_equal(dupe_user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_equal(self.user.external_identity, {})
def test_ensure_external_identity_uniqueness_unverified(self):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'CREATE'}})
assert_equal(dupe_user.external_identity, self.user.external_identity)
ensure_external_identity_uniqueness('orcid', self.provider_id, self.user)
dupe_user.reload()
self.user.reload()
assert_equal(dupe_user.external_identity, {})
assert_equal(self.user.external_identity, {'orcid': {self.provider_id: 'CREATE'}})
def test_ensure_external_identity_uniqueness_verified(self):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'VERIFIED'}})
assert_equal(dupe_user.external_identity, {'orcid': {self.provider_id: 'VERIFIED'}})
assert_not_equal(dupe_user.external_identity, self.user.external_identity)
with assert_raises(ValidationError):
ensure_external_identity_uniqueness('orcid', self.provider_id, self.user)
dupe_user.reload()
self.user.reload()
assert_equal(dupe_user.external_identity, {'orcid': {self.provider_id: 'VERIFIED'}})
assert_equal(self.user.external_identity, {})
def test_ensure_external_identity_uniqueness_multiple(self):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'CREATE'}})
assert_equal(dupe_user.external_identity, self.user.external_identity)
ensure_external_identity_uniqueness('orcid', self.provider_id)
dupe_user.reload()
self.user.reload()
assert_equal(dupe_user.external_identity, {})
assert_equal(self.user.external_identity, {})
# TODO: Use mock add-on
class TestAddonUserViews(OsfTestCase):
def setUp(self):
super(TestAddonUserViews, self).setUp()
self.user = AuthUserFactory()
def test_choose_addons_add(self):
"""Add add-ons; assert that add-ons are attached to project.
"""
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.user.reload()
assert_true(self.user.get_addon('github'))
def test_choose_addons_remove(self):
# Add, then delete, add-ons; assert that add-ons are not attached to
# project.
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.app.post_json(
url,
{'github': False},
auth=self.user.auth
).maybe_follow()
self.user.reload()
assert_false(self.user.get_addon('github'))
@pytest.mark.enable_enqueue_task
class TestConfigureMailingListViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestConfigureMailingListViews, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
def test_user_unsubscribe_and_subscribe_help_mailing_list(self):
user = AuthUserFactory()
url = api_url_for('user_choose_mailing_lists')
payload = {settings.OSF_HELP_LIST: False}
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST])
payload = {settings.OSF_HELP_LIST: True}
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST])
def test_get_notifications(self):
user = AuthUserFactory()
mailing_lists = dict(user.osf_mailing_lists.items() + user.mailchimp_mailing_lists.items())
url = api_url_for('user_notifications')
res = self.app.get(url, auth=user.auth)
assert_equal(mailing_lists, res.json['mailing_lists'])
def test_osf_help_mails_subscribe(self):
user = UserFactory()
user.osf_mailing_lists[settings.OSF_HELP_LIST] = False
user.save()
update_osf_help_mails_subscription(user, True)
assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST])
def test_osf_help_mails_unsubscribe(self):
user = UserFactory()
user.osf_mailing_lists[settings.OSF_HELP_LIST] = True
user.save()
update_osf_help_mails_subscription(user, False)
assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST])
@unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api):
user = AuthUserFactory()
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
payload = {settings.MAILCHIMP_GENERAL_LIST: True}
url = api_url_for('user_choose_mailing_lists')
res = self.app.post_json(url, payload, auth=user.auth)
# the test app doesn't have celery handlers attached, so we need to call this manually.
handlers.celery_teardown_request()
user.reload()
# check user.mailing_lists is updated
assert_true(user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST])
assert_equal(
user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST],
payload[settings.MAILCHIMP_GENERAL_LIST]
)
# check that user is subscribed
mock_client.lists.subscribe.assert_called_with(id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True)
def test_get_mailchimp_get_endpoint_returns_200(self):
url = api_url_for('mailchimp_get_endpoint')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'subscribe' actions sent to the OSF via mailchimp
webhooks update the OSF database.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is not subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': False}
user.save()
# user subscribes and webhook sends request to OSF
data = {
'type': 'subscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type='application/x-www-form-urlencoded',
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_true(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'profile' actions sent to the OSF via mailchimp
webhooks do not cause any database changes.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': True}
user.save()
# user hits subscribe again, which will update the user's existing info on mailchimp
# webhook sends request (when configured to update on changes made through the API)
data = {
'type': 'profile',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type='application/x-www-form-urlencoded',
auth=user.auth)
# user field does not change
user.reload()
assert_true(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api):
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': True}
user.save()
# user unsubscribes through mailchimp and webhook sends request
data = {
'type': 'unsubscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type='application/x-www-form-urlencoded',
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_false(user.mailchimp_mailing_lists[list_name])
def test_sync_data_from_mailchimp_fails_without_secret_key(self):
user = AuthUserFactory()
payload = {'values': {'type': 'unsubscribe',
'data': {'list_id': '12345',
'email': 'freddie@cos.io'}}}
url = api_url_for('sync_data_from_mailchimp')
res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
@classmethod
def tearDownClass(cls):
super(TestConfigureMailingListViews, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
# TODO: Move to OSF Storage
class TestFileViews(OsfTestCase):
def setUp(self):
super(TestFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.project.add_contributor(self.user)
self.project.save()
def test_grid_data(self):
url = self.project.api_url_for('grid_data')
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, http.OK)
expected = rubeus.to_hgrid(self.project, auth=Auth(self.user))
data = res.json['data']
assert_equal(len(data), len(expected))
class TestTagViews(OsfTestCase):
def setUp(self):
super(TestTagViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
@unittest.skip('Tags endpoint disabled for now.')
def test_tag_get_returns_200(self):
url = web_url_for('project_tag', tag='foo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestReorderComponents(OsfTestCase):
def setUp(self):
super(TestReorderComponents, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
# Project is public
self.project = ProjectFactory.create(creator=self.creator, is_public=True)
self.project.add_contributor(self.contrib, auth=Auth(self.creator))
# subcomponent that only creator can see
self.public_component = NodeFactory(creator=self.creator, is_public=True)
self.private_component = NodeFactory(creator=self.creator, is_public=False)
NodeRelation.objects.create(parent=self.project, child=self.public_component)
NodeRelation.objects.create(parent=self.project, child=self.private_component)
self.project.save()
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_reorder_components_with_private_component(self):
# contrib tries to reorder components
payload = {
'new_list': [
'{0}'.format(self.private_component._id),
'{0}'.format(self.public_component._id),
]
}
url = self.project.api_url_for('project_reorder_components')
res = self.app.post_json(url, payload, auth=self.contrib.auth)
assert_equal(res.status_code, 200)
class TestWikiWidgetViews(OsfTestCase):
def setUp(self):
super(TestWikiWidgetViews, self).setUp()
# project with no home wiki page
self.project = ProjectFactory()
self.read_only_contrib = AuthUserFactory()
self.project.add_contributor(self.read_only_contrib, permissions='read')
self.noncontributor = AuthUserFactory()
# project with no home wiki content
self.project2 = ProjectFactory(creator=self.project.creator)
self.project2.add_contributor(self.read_only_contrib, permissions='read')
WikiPage.objects.create_for_node(self.project2, 'home', '', Auth(self.project.creator))
def test_show_wiki_for_contributors_when_no_wiki_or_content(self):
contrib = self.project.contributor_set.get(user=self.project.creator)
assert_true(_should_show_wiki_widget(self.project, contrib))
assert_true(_should_show_wiki_widget(self.project2, contrib))
def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self):
contrib = self.project.contributor_set.get(user=self.read_only_contrib)
assert_false(_should_show_wiki_widget(self.project, contrib))
assert_false(_should_show_wiki_widget(self.project2, contrib))
def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, None))
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestProjectCreation(OsfTestCase):
def setUp(self):
super(TestProjectCreation, self).setUp()
self.creator = AuthUserFactory()
self.url = api_url_for('project_new_post')
self.user1 = AuthUserFactory()
self.user2 = AuthUserFactory()
self.project = ProjectFactory(creator=self.user1)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
def tearDown(self):
super(TestProjectCreation, self).tearDown()
def test_needs_title(self):
res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_create_component_strips_html(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
url = web_url_for('project_new_node', pid=project._id)
post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''}
request = self.app.post(url, post_data, auth=user.auth).follow()
project.reload()
child = project.nodes[0]
# HTML has been stripped
assert_equal(child.title, 'New Component Title')
def test_strip_html_from_title(self):
payload = {
'title': 'no html <b>here</b>'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_equal('no html here', node.title)
def test_only_needs_title(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
def test_title_must_be_one_long(self):
payload = {
'title': ''
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_title_must_be_less_than_200(self):
payload = {
'title': ''.join([str(x) for x in range(0, 250)])
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_fails_to_create_project_with_whitespace_title(self):
payload = {
'title': ' '
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_creates_a_project(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.title, 'Im a real title')
def test_create_component_add_contributors_admin(self):
url = web_url_for('project_new_node', pid=self.project._id)
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=self.user1.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
# check redirect url
assert_in('/contributors/', res.location)
def test_create_component_with_contributors_read_write(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=non_admin.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(non_admin, child.contributors)
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
assert_equal(child.get_permissions(non_admin), ['read', 'write', 'admin'])
# check redirect url
assert_in('/contributors/', res.location)
def test_create_component_with_contributors_read(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read'])
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=non_admin.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_create_component_add_no_contributors(self):
url = web_url_for('project_new_node', pid=self.project._id)
post_data = {'title': 'New Component With Contributors Title', 'category': ''}
res = self.app.post(url, post_data, auth=self.user1.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(self.user1, child.contributors)
assert_not_in(self.user2, child.contributors)
# check redirect url
assert_not_in('/contributors/', res.location)
def test_new_project_returns_serialized_node_data(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = res.json['newNode']
assert_true(node)
assert_equal(node['title'], 'Im a real title')
def test_description_works(self):
payload = {
'title': 'Im a real title',
'description': 'I describe things!'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.description, 'I describe things!')
def test_can_template(self):
other_node = ProjectFactory(creator=self.creator)
payload = {
'title': 'Im a real title',
'template': other_node._id
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.template_node, other_node)
def test_project_before_template_no_addons(self):
project = ProjectFactory()
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_equal(res.json['prompts'], [])
def test_project_before_template_with_addons(self):
project = ProjectWithAddonFactory(addon='box')
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_in('Box', res.json['prompts'])
def test_project_new_from_template_non_user(self):
project = ProjectFactory()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=None)
assert_equal(res.status_code, 302)
res2 = res.follow(expect_errors=True)
assert_equal(res2.status_code, 301)
assert_equal(res2.request.path, '/login')
def test_project_new_from_template_public_non_contributor(self):
non_contributor = AuthUserFactory()
project = ProjectFactory(is_public=True)
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=non_contributor.auth)
assert_equal(res.status_code, 201)
def test_project_new_from_template_contributor(self):
contributor = AuthUserFactory()
project = ProjectFactory(is_public=False)
project.add_contributor(contributor)
project.save()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=contributor.auth)
assert_equal(res.status_code, 201)
class TestUnconfirmedUserViews(OsfTestCase):
def test_can_view_profile(self):
user = UnconfirmedUserFactory()
url = web_url_for('profile_view_id', uid=user._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestStaticFileViews(OsfTestCase):
def test_robots_dot_txt(self):
res = self.app.get('/robots.txt')
assert_equal(res.status_code, 200)
assert_in('User-agent', res)
assert_in('html', res.headers['Content-Type'])
def test_favicon(self):
res = self.app.get('/favicon.ico')
assert_equal(res.status_code, 200)
assert_in('image/vnd.microsoft.icon', res.headers['Content-Type'])
def test_getting_started_page(self):
res = self.app.get('/getting-started/')
assert_equal(res.status_code, 302)
assert_equal(res.location, 'https://openscience.zendesk.com/hc/en-us')
def test_help_redirect(self):
res = self.app.get('/help/')
assert_equal(res.status_code,302)
class TestUserConfirmSignal(OsfTestCase):
def test_confirm_user_signal_called_when_user_claims_account(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo', email=fake_email())
unclaimed_user.save()
token = unclaimed_user.get_unclaimed_record(project._primary_key)['token']
with capture_signals() as mock_signals:
url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token)
payload = {'username': unclaimed_user.username,
'password': 'password',
'password2': 'password'}
res = self.app.post(url, payload)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
def test_confirm_user_signal_called_when_user_confirms_email(self):
unconfirmed_user = UnconfirmedUserFactory()
unconfirmed_user.save()
# user goes to email confirmation link
token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username)
with capture_signals() as mock_signals:
url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
# copied from tests/test_comments.py
class TestCommentViews(OsfTestCase):
def setUp(self):
super(TestCommentViews, self).setUp()
self.project = ProjectFactory(is_public=True)
self.user = AuthUserFactory()
self.project.add_contributor(self.user)
self.project.save()
self.user.save()
def test_view_project_comments_updates_user_comments_view_timestamp(self):
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, {
'page': 'node',
'rootId': self.project._id
}, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[self.project._id]
view_timestamp = timezone.now()
assert_datetime_equal(user_timestamp, view_timestamp)
def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self):
non_contributor = AuthUserFactory()
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, {
'page': 'node',
'rootId': self.project._id
}, auth=self.user.auth)
non_contributor.reload()
assert_not_in(self.project._id, non_contributor.comments_viewed_timestamp)
def test_view_comments_updates_user_comments_view_timestamp_files(self):
osfstorage = self.project.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file('test_file')
test_file.create_version(self.user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, {
'page': 'files',
'rootId': test_file._id
}, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[test_file._id]
view_timestamp = timezone.now()
assert_datetime_equal(user_timestamp, view_timestamp)
# Regression test for https://openscience.atlassian.net/browse/OSF-5193
# moved from tests/test_comments.py
def test_find_unread_includes_edited_comments(self):
project = ProjectFactory()
user = AuthUserFactory()
project.add_contributor(user, save=True)
comment = CommentFactory(node=project, user=project.creator)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 1
url = project.api_url_for('update_comments_timestamp')
payload = {'page': 'node', 'rootId': project._id}
self.app.put_json(url, payload, auth=user.auth)
user.reload()
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 0
# Edit previously read comment
comment.edit(
auth=Auth(project.creator),
content='edited',
save=True
)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 1
class TestResetPassword(OsfTestCase):
def setUp(self):
super(TestResetPassword, self).setUp()
self.user = AuthUserFactory()
self.another_user = AuthUserFactory()
self.osf_key_v2 = generate_verification_key(verification_type='password')
self.user.verification_key_v2 = self.osf_key_v2
self.user.verification_key = None
self.user.save()
self.get_url = web_url_for(
'reset_password_get',
uid=self.user._id,
token=self.osf_key_v2['token']
)
self.get_url_invalid_key = web_url_for(
'reset_password_get',
uid=self.user._id,
token=generate_verification_key()
)
self.get_url_invalid_user = web_url_for(
'reset_password_get',
uid=self.another_user._id,
token=self.osf_key_v2['token']
)
# successfully load reset password page
def test_reset_password_view_returns_200(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
# raise http 400 error
def test_reset_password_view_raises_400(self):
res = self.app.get(self.get_url_invalid_key, expect_errors=True)
assert_equal(res.status_code, 400)
res = self.app.get(self.get_url_invalid_user, expect_errors=True)
assert_equal(res.status_code, 400)
self.user.verification_key_v2['expires'] = timezone.now()
self.user.save()
res = self.app.get(self.get_url, expect_errors=True)
assert_equal(res.status_code, 400)
# successfully reset password
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_can_reset_password_if_form_success(self, mock_service_validate):
# load reset password page and submit email
res = self.app.get(self.get_url)
form = res.forms['resetPasswordForm']
form['password'] = 'newpassword'
form['password2'] = 'newpassword'
res = form.submit()
# check request URL is /resetpassword with username and new verification_key_v2 token
request_url_path = res.request.path
assert_in('resetpassword', request_url_path)
assert_in(self.user._id, request_url_path)
assert_not_in(self.user.verification_key_v2['token'], request_url_path)
# check verification_key_v2 for OSF is destroyed and verification_key for CAS is in place
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
assert_not_equal(self.user.verification_key, None)
# check redirection to CAS login with username and the new verification_key(CAS)
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_true('login?service=' in location)
assert_true('username={}'.format(urllib.quote(self.user.username, safe='@')) in location)
assert_true('verification_key={}'.format(self.user.verification_key) in location)
# check if password was updated
self.user.reload()
assert_true(self.user.check_password('newpassword'))
# check if verification_key is destroyed after service validation
mock_service_validate.return_value = cas.CasResponse(
authenticated=True,
user=self.user._id,
attributes={'accessToken': fake.md5()}
)
ticket = fake.md5()
service_url = 'http://accounts.osf.io/?ticket=' + ticket
cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_equal(self.user.verification_key, None)
# log users out before they land on reset password page
def test_reset_password_logs_out_user(self):
# visit reset password link while another user is logged in
res = self.app.get(self.get_url, auth=self.another_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('resetpassword', location)
@pytest.mark.enable_quickfiles_creation
@mock.patch('website.views.PROXY_EMBER_APPS', False)
class TestResolveGuid(OsfTestCase):
def setUp(self):
super(TestResolveGuid, self).setUp()
def test_preprint_provider_without_domain(self):
provider = PreprintProviderFactory(domain='')
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_preprint_provider_with_domain_without_redirect(self):
domain = 'https://test.com/'
provider = PreprintProviderFactory(_id='test', domain=domain, domain_redirect_enabled=False)
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_preprint_provider_with_domain_with_redirect(self):
domain = 'https://test.com/'
provider = PreprintProviderFactory(_id='test', domain=domain, domain_redirect_enabled=True)
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_is_redirect(res)
assert_equal(res.status_code, 301)
assert_equal(
res.headers['location'],
'{}{}/'.format(domain, preprint._id)
)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_preprint_provider_with_osf_domain(self):
provider = PreprintProviderFactory(_id='osf', domain='https://osf.io/')
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_deleted_quick_file_gone(self):
user = AuthUserFactory()
quickfiles = QuickFilesNode.objects.get(creator=user)
osfstorage = quickfiles.get_addon('osfstorage')
root = osfstorage.get_root()
test_file = root.append_file('soon_to_be_deleted.txt')
guid = test_file.get_guid(create=True)._id
test_file.delete()
url = web_url_for('resolve_guid', _guid=True, guid=guid)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, http.GONE)
assert_equal(res.request.path, '/{}/'.format(guid))
class TestConfirmationViewBlockBingPreview(OsfTestCase):
def setUp(self):
super(TestConfirmationViewBlockBingPreview, self).setUp()
self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534+ (KHTML, like Gecko) BingPreview/1.0b'
# reset password link should fail with BingPreview
def test_reset_password_get_returns_403(self):
user = UserFactory()
osf_key_v2 = generate_verification_key(verification_type='password')
user.verification_key_v2 = osf_key_v2
user.verification_key = None
user.save()
reset_password_get_url = web_url_for(
'reset_password_get',
uid=user._id,
token=osf_key_v2['token']
)
res = self.app.get(
reset_password_get_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# new user confirm account should fail with BingPreview
def test_confirm_email_get_new_user_returns_403(self):
user = OSFUser.create_unconfirmed('unconfirmed@cos.io', 'abCD12#$', 'Unconfirmed User')
user.save()
confirm_url = user.get_confirmation_url('unconfirmed@cos.io', external=False)
res = self.app.get(
confirm_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for adding new email should fail with BingPreview
def test_confirm_email_add_email_returns_403(self):
user = UserFactory()
user.add_unconfirmed_email('unconfirmed@cos.io')
user.save()
confirm_url = user.get_confirmation_url('unconfirmed@cos.io', external=False) + '?logout=1'
res = self.app.get(
confirm_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for merging accounts should fail with BingPreview
def test_confirm_email_merge_account_returns_403(self):
user = UserFactory()
user_to_be_merged = UserFactory()
user.add_unconfirmed_email(user_to_be_merged.username)
user.save()
confirm_url = user.get_confirmation_url(user_to_be_merged.username, external=False) + '?logout=1'
res = self.app.get(
confirm_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for new user claiming contributor should fail with BingPreview
def test_claim_user_form_new_user(self):
referrer = AuthUserFactory()
project = ProjectFactory(creator=referrer, is_public=True)
given_name = fake.name()
given_email = fake_email()
user = project.add_unregistered_contributor(
fullname=given_name,
email=given_email,
auth=Auth(user=referrer)
)
project.save()
claim_url = user.get_claim_url(project._primary_key)
res = self.app.get(
claim_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for existing user claiming contributor should fail with BingPreview
def test_claim_user_form_existing_user(self):
referrer = AuthUserFactory()
project = ProjectFactory(creator=referrer, is_public=True)
auth_user = AuthUserFactory()
pending_user = project.add_unregistered_contributor(
fullname=auth_user.fullname,
email=None,
auth=Auth(user=referrer)
)
project.save()
claim_url = pending_user.get_claim_url(project._primary_key)
res = self.app.get(
claim_url,
auth = auth_user.auth,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# account creation confirmation for ORCiD login should fail with BingPreview
def test_external_login_confirm_email_get_create_user(self):
name, email = fake.name(), fake_email()
provider_id = fake.ean()
external_identity = {
'service': {
provider_id: 'CREATE'
}
}
user = OSFUser.create_unconfirmed(
username=email,
password=str(fake.password()),
fullname=name,
external_identity=external_identity,
)
user.save()
create_url = user.get_confirmation_url(
user.username,
external_id_provider='service',
destination='dashboard'
)
res = self.app.get(
create_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# account linking confirmation for ORCiD login should fail with BingPreview
def test_external_login_confirm_email_get_link_user(self):
user = UserFactory()
provider_id = fake.ean()
user.external_identity = {
'service': {
provider_id: 'LINK'
}
}
user.add_unconfirmed_email(user.username, external_identity='service')
user.save()
link_url = user.get_confirmation_url(
user.username,
external_id_provider='service',
destination='dashboard'
)
res = self.app.get(
link_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
if __name__ == '__main__':
unittest.main()
|
pattisdr/osf.io
|
tests/test_views.py
|
Python
|
apache-2.0
| 212,089
|
[
"Brian",
"VisIt"
] |
50054c84e42246df4ac8376dbf4eb5e78e93d0a964a509de6315a949b30622be
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSeqlogo(RPackage):
"""seqLogo takes the position weight matrix of a DNA sequence motif and
plots the corresponding sequence logo as introduced by Schneider and
Stephens (1990)."""
homepage = "https://bioconductor.org/packages/seqLogo/"
url = "https://git.bioconductor.org/packages/seqLogo"
version('1.44.0', git='https://git.bioconductor.org/packages/seqLogo', commit='4cac14ff29f413d6de1a9944eb5d21bfe5045fac')
depends_on('r@3.4.3:3.4.9', when='@1.44.0')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-seqlogo/package.py
|
Python
|
lgpl-2.1
| 1,762
|
[
"Bioconductor"
] |
e11bb2863bdcfd351a088d17679aad9d8128229e2e20bfaaca968abc6cb3720e
|
"""
=================
Lorentzian Fitter
=================
"""
import numpy
from numpy.ma import median
from numpy import pi
from ...mpfit import mpfit
from . import fitter
from astropy.extern.six.moves import xrange
class LorentzianFitter(fitter.SimpleFitter):
def __init__():
self.npars = 3
self.npeaks = 1
self.onepeaklorentzfit = self._fourparfitter(self.onepeaklorentzian)
def __call__(self,*args,**kwargs):
return self.multilorentzfit(*args,**kwargs)
def onedlorentzian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A/(2*pi)*w/((x-dx)**2 + (w/2.0)**2)
def n_lorentzian(pars=None,a=None,dx=None,width=None):
"""
Returns a function that sums over N lorentzians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
width - line widths (Lorentzian FWHM)
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
width = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(width) == len(a)):
raise ValueError("Wrong array lengths! dx: %i width %i a: %i" % (len(dx),len(width),len(a)))
def L(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] / (2*pi) * w / ((x-dx)**2 + (w/2.0)**2)
return v
return L
def multilorentzfit(self):
"""
not implemented
"""
print "Not implemented"
|
mikelum/pyspeckit
|
pyspeckit/spectrum/models/lorentzian.py
|
Python
|
mit
| 1,902
|
[
"Gaussian"
] |
440c79e90e0ed2519ca73f5bb613476a56470ad8242c8ab25d1c85c595693a31
|
# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
'''
Combine everything into `_fdint.pyx` and `_fdint.pxd`, in order to take advantage
of inlining.
'''
import os
import sys
fdint_dir = os.path.join(os.path.dirname(__file__), '../fdint/')
templates_dir = os.path.join(os.path.dirname(__file__), 'templates')
fdint_pyx_path = os.path.join(fdint_dir, '_fdint.pyx')
fdint_pxd_path = os.path.join(fdint_dir, '_fdint.pxd')
bases = [
'_fd',
'_fd_whole',
'_vfd',
'_dfd',
'_vdfd',
'_ifd',
'_vifd',
'_gfd',
'_vgfd',
'_dgfd',
'_vdgfd',
'_nonparabolic',
'_vnonparabolic',
'_dnonparabolic',
'_vdnonparabolic',
]
with open(fdint_pyx_path, 'w') as f:
f.write("""# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
# This file was generated by `scripts/gen__fdint.py`.
# Do not edit this file directly, or your changes will be lost.
'''
Precise and fast Fermi-Dirac integrals of integer and half integer order.
[1] T. Fukushima, "Precise and fast computation of Fermi-Dirac integral of
integer and half integer order by piecewise minimax rational approximation,"
Applied Mathematics and Computation, vol. 259, pp. 708-729, May 2015.
DOI: 10.1016/j.amc.2015.03.009
[2] T. Fukushima, "Precise and fast computation of inverse Fermi-Dirac
integral of order 1/2 by minimax rational function approximation,"
Applied Mathematics and Computation, vol. 259, pp. 698-707, May 2015.
DOI: 10.1016/j.amc.2015.03.015
[3] T. Fukushima, "Precise and fast computation of generalized Fermi-Dirac
integral by parameter polynomial approximation," 2014.
DOI: 10.13140/2.1.1094.6566
Almost all of the low-level functions for this package are contained in this
module in order to take advantage of inlining.
'''
""")
f.write('cimport cython\n')
f.write('from libc.math cimport exp, log, sqrt\n')
f.write('cimport numpy as np\n')
f.write('import numpy\n')
f.write('import warnings\n')
f.write('\n')
for base in bases:
path = os.path.join(fdint_dir, base+'.pyx')
with open(path, 'r') as fin:
for line in fin:
f.write(line)
os.remove(path)
with open(fdint_pxd_path, 'w') as f:
f.write('cimport numpy as np\n')
for base in bases:
path = os.path.join(fdint_dir, base+'.pxd')
with open(path, 'r') as fin:
for line in fin:
f.write(line)
os.remove(path)
|
scott-maddox/fdint
|
scripts/gen__fdint.py
|
Python
|
bsd-3-clause
| 2,706
|
[
"DIRAC"
] |
e3f60b31b675b78735370f25671cbc35986c28d9b438415edc909f1b329417dd
|
import warnings
warnings.simplefilter('always', DeprecationWarning)
import os
import functools
import os.path
import re
import urllib
import urllib2
import json
import imp
import random
import tabulate
from connection import H2OConnection
from job import H2OJob
from expr import ExprNode
from frame import H2OFrame, _py_tmp_key
from model import H2OBinomialModel,H2OAutoEncoderModel,H2OClusteringModel,H2OMultinomialModel,H2ORegressionModel
import h2o_model_builder
__PROGRESS_BAR__ = True # display & update progress bar while polling
def lazy_import(path):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:return: A new H2OFrame
"""
if isinstance(path,(list,tuple)): return [_import(p)[0] for p in path]
elif os.path.isdir(path): return _import(path)
else: return [_import(path)[0]]
def _import(path):
j = H2OConnection.get_json(url_suffix="ImportFiles", path=path)
if j['fails']: raise ValueError("ImportFiles of " + path + " failed on " + str(j['fails']))
return j['destination_frames']
def upload_file(path, destination_frame=""):
"""
Upload a dataset at the path given from the local machine to the H2O cluster.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The name of the H2O Frame in the H2O Cluster.
:return: A new H2OFrame
"""
fui = {"file": os.path.abspath(path)}
destination_frame = _py_tmp_key() if destination_frame == "" else destination_frame
H2OConnection.post_json(url_suffix="PostFile", file_upload_info=fui,destination_frame=destination_frame)
return H2OFrame(raw_id=destination_frame)
def import_file(path=None):
"""
Import a frame from a file (remote or local machine). If you run H2O on Hadoop, you can access to HDFS
:param path: A path specifying the location of the data to import.
:return: A new H2OFrame
"""
return H2OFrame(file_path=path)
def parse_setup(raw_frames):
"""
:param raw_frames: A collection of imported file frames
:return: A ParseSetup "object"
"""
# The H2O backend only accepts things that are quoted
if isinstance(raw_frames, unicode): raw_frames = [raw_frames]
j = H2OConnection.post_json(url_suffix="ParseSetup", source_frames=[_quoted(id) for id in raw_frames])
return j
def parse(setup, h2o_name, first_line_is_header=(-1, 0, 1)):
"""
Trigger a parse; blocking; removeFrame just keep the Vecs.
:param setup: The result of calling parse_setup.
:param h2o_name: The name of the H2O Frame on the back end.
:param first_line_is_header: -1 means data, 0 means guess, 1 means header.
:return: A new parsed object
"""
# Parse parameters (None values provided by setup)
p = { 'destination_frame' : h2o_name,
'parse_type' : None,
'separator' : None,
'single_quotes' : None,
'check_header' : None,
'number_columns' : None,
'chunk_size' : None,
'delete_on_done' : True,
'blocking' : False,
}
if isinstance(first_line_is_header, tuple):
first_line_is_header = setup["check_header"]
if setup["column_names"]:
setup["column_names"] = [_quoted(name) for name in setup["column_names"]]
p["column_names"] = None
if setup["column_types"]:
setup["column_types"] = [_quoted(name) for name in setup["column_types"]]
p["column_types"] = None
if setup["na_strings"]:
setup["na_strings"] = [[_quoted(na) for na in col] if col is not None else [] for col in setup["na_strings"]]
p["na_strings"] = None
# update the parse parameters with the parse_setup values
p.update({k: v for k, v in setup.iteritems() if k in p})
p["check_header"] = first_line_is_header
# Extract only 'name' from each src in the array of srcs
p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']]
# Request blocking parse
j = H2OJob(H2OConnection.post_json(url_suffix="Parse", **p), "Parse").poll()
return j.jobs
def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)):
"""
Used in conjunction with lazy_import and parse_setup in order to make alterations before parsing.
:param setup: Result of h2o.parse_setup
:param id: An optional id for the frame.
:param first_line_is_header: -1,0,1 if the first line is to be used as the header
:return: An H2OFrame object
"""
id = setup["destination_frame"]
fr = H2OFrame()
parsed = parse(setup, id, first_line_is_header)
fr._nrows = parsed['rows']
fr._col_names = parsed['column_names']
fr._ncols = len(fr._col_names)
fr._computed = True
fr._id = id
fr._keep = True
return fr
def _quoted(key):
if key == None: return "\"\""
is_quoted = len(re.findall(r'\"(.+?)\"', key)) != 0
key = key if is_quoted else "\"" + key + "\""
return key
def assign(data,id):
rapids(ExprNode(",", ExprNode("gput", id, data), ExprNode("removeframe", data))._eager())
data._id = id
return data
def which(condition):
"""
:param condition: A conditional statement.
:return: A H2OFrame of 1 column filled with 0-based indices for which the condition is True
"""
return H2OFrame(expr=ExprNode("h2o.which",condition,False))._frame()
def ifelse(test,yes,no):
"""
Semantically equivalent to R's ifelse.
Based on the booleans in the test vector, the output has the values of the yes and no
vectors interleaved (or merged together).
:param test: A "test" H2OFrame
:param yes: A "yes" H2OFrame
:param no: A "no" H2OFrame
:return: An H2OFrame
"""
return H2OFrame(expr=ExprNode("ifelse",test,yes,no))._frame()
def get_future_model(future_model):
"""
Waits for the future model to finish building, and then returns the model.
:param future_model: an H2OModelFuture object
:return: a resolved model (i.e. an H2OBinomialModel, H2ORegressionModel, H2OMultinomialModel, ...)
"""
return h2o_model_builder._resolve_model(future_model)
def get_model(model_id):
"""
Return the specified model
:param model_id: The model identification in h2o
"""
model_json = H2OConnection.get_json("Models/"+model_id)["models"][0]
model_type = model_json["output"]["model_category"]
if model_type=="Binomial": return H2OBinomialModel(model_id, model_json)
elif model_type=="Clustering": return H2OClusteringModel(model_id, model_json)
elif model_type=="Regression": return H2ORegressionModel(model_id, model_json)
elif model_type=="Multinomial": return H2OMultinomialModel(model_id, model_json)
elif model_type=="AutoEncoder": return H2OAutoEncoderModel(model_id, model_json)
else: raise NotImplementedError(model_type)
def get_frame(frame_id):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:return: An H2OFrame
"""
return H2OFrame.get_frame(frame_id)
"""
Here are some testing utilities for running the pyunit tests in conjunction with run.py.
run.py issues an ip and port as a string: "<ip>:<port>".
The expected value of sys_args[1] is "<ip>:<port>"
"""
"""
All tests MUST have the following structure:
import sys
sys.path.insert(1, "..") # may vary depending on this test's position relative to h2o-py
import h2o
def my_test(ip=None, port=None):
...test filling...
if __name__ == "__main__":
h2o.run_test(sys.argv, my_test)
So each test must have an ip and port
"""
# TODO/FIXME: need to create an internal testing framework for python ... internal IP addresses should NOT be published as part of package!
# HDFS helpers
def get_h2o_internal_hdfs_name_node():
return "172.16.2.176"
def is_running_internal_to_h2o():
url = "http://{0}:50070".format(get_h2o_internal_hdfs_name_node())
try:
urllib2.urlopen(urllib2.Request(url))
internal = True
except:
internal = False
return internal
def check_models(model1, model2, use_cross_validation=False, op='e'):
"""
Check that the given models are equivalent
:param model1:
:param model2:
:param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use
training metrics.
:param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=
:return: None. Throw meaningful error messages if the check fails
"""
# 1. Check model types
model1_type = type(model1)
model2_type = type(model2)
assert model1_type == model2_type, "The model types differ. The first model is of type {0} and the second " \
"models is of type {1}.".format(model1_type, model2_type)
# 2. Check model metrics
if isinstance(model1,H2OBinomialModel): # 2a. Binomial
# F1
f1_1 = model1.F1(xval=use_cross_validation)
f1_2 = model2.F1(xval=use_cross_validation)
if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1])
elif isinstance(model1,H2ORegressionModel): # 2b. Regression
# MSE
mse1 = model1.mse(xval=use_cross_validation)
mse2 = model2.mse(xval=use_cross_validation)
if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be == to the second.".format(mse1, mse2)
elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be > than the second.".format(mse1, mse2)
elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be >= than the second.".format(mse1, mse2)
elif isinstance(model1,H2OMultinomialModel): # 2c. Multinomial
# hit-ratio
pass
elif isinstance(model1,H2OClusteringModel): # 2d. Clustering
# totss
totss1 = model1.totss(xval=use_cross_validation)
totss2 = model2.totss(xval=use_cross_validation)
if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be == to the second.".format(totss1,
totss2)
elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be > than the second.".format(totss1,
totss2)
elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be >= than the second." \
"".format(totss1, totss2)
def check_dims_values(python_obj, h2o_frame, rows, cols):
"""
Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python object
conforms to the rules specified in the h2o frame documentation.
:param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame
:param h2o_frame: an H2OFrame
:param rows: number of rows
:param cols: number of columns
:return: None
"""
h2o_rows, h2o_cols = h2o_frame.dim
assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \
"".format(h2o_rows, rows, h2o_cols, cols)
if isinstance(python_obj, (list, tuple)):
for r in range(rows):
for c in range(cols):
pval = python_obj[r][c] if rows > 1 else python_obj[c]
hval = h2o_frame[r,c]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} and column " \
"{1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval)
elif isinstance(python_obj, dict):
for r in range(rows):
for k in python_obj.keys():
pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]
hval = h2o_frame[r,k]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} and column " \
"{1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval)
def np_comparison_check(h2o_data, np_data, num_elements):
"""
Check values achieved by h2o against values achieved by numpy
:param h2o_data: an H2OFrame or H2OVec
:param np_data: a numpy array
:param num_elements: number of elements to compare
:return: None
"""
# Check for numpy
try:
imp.find_module('numpy')
except ImportError:
assert False, "failed comparison check because unable to import numpy"
import numpy as np
rows, cols = h2o_data.dim
for i in range(num_elements):
r = random.randint(0,rows-1)
c = random.randint(0,cols-1)
h2o_val = h2o_data[r,c] if isinstance(h2o_data,H2OFrame) else h2o_data[r]
np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]
if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(
assert np.absolute(h2o_val - np_val) < 1e-6, \
"failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val)
def run_test(sys_args, test_to_run):
# import pkg_resources
# ver = pkg_resources.get_distribution("h2o").version
# print "H2O PYTHON PACKAGE VERSION: " + str(ver)
ip, port = sys_args[2].split(":")
init(ip,port,strict_version_check=False)
log_and_echo("------------------------------------------------------------")
log_and_echo("")
log_and_echo("STARTING TEST: "+str(ou()))
log_and_echo("")
log_and_echo("------------------------------------------------------------")
num_keys = store_size()
try:
if len(sys_args) > 3 and sys_args[3] == "--ipynb": ipy_notebook_exec(sys_args[4],save_and_norun=False)
else: test_to_run(ip, port)
finally:
remove_all()
if keys_leaked(num_keys): print "Leaked Keys!"
def ou():
"""
Where is my baguette!?
:return: the name of the baguette. oh uhr uhr huhr
"""
from inspect import stack
return stack()[2][1]
def no_progress():
"""
Disable the progress bar from flushing to stdout. The completed progress bar is printed
when a job is complete so as to demarcate a log file.
:return: None
"""
global __PROGRESS_BAR__
__PROGRESS_BAR__=False
def do_progress():
"""
Enable the progress bar. (Progress bar is enabled by default).
:return: None
"""
global __PROGRESS_BAR__
__PROGRESS_BAR__=True
def log_and_echo(message):
"""
Log a message on the server-side logs
This is helpful when running several pieces of work one after the other on a single H2O
cluster and you want to make a notation in the H2O server side log where one piece of
work ends and the next piece of work begins.
Sends a message to H2O for logging. Generally used for debugging purposes.
:param message: A character string with the message to write to the log.
:return: None
"""
if message is None: message = ""
H2OConnection.post_json("LogAndEcho", message=message)
def ipy_notebook_exec(path,save_and_norun=False):
notebook = json.load(open(path))
program = ''
for block in ipy_blocks(notebook):
for line in ipy_lines(block):
if "h2o.init" not in line:
program += line if '\n' in line else line + '\n'
if save_and_norun:
with open(os.path.basename(path).split('ipynb')[0]+'py',"w") as f:
f.write(program)
else:
d={}
exec program in d # safe, but horrible (exec is horrible)
def ipy_blocks(notebook):
if 'worksheets' in notebook.keys():
return notebook['worksheets'][0]['cells'] # just take the first worksheet
elif 'cells' in notebook.keys():
return notebook['cells']
else:
raise NotImplementedError, "ipython notebook cell/block json format not handled"
def ipy_lines(block):
if 'source' in block.keys():
return block['source']
elif 'input' in block.keys():
return block['input']
else:
raise NotImplementedError, "ipython notebook source/line json format not handled"
def remove(object):
"""
Remove object from H2O. This is a "hard" delete of the object. It removes all subparts.
:param object: The object pointing to the object to be removed.
:return: None
"""
if object is None:
raise ValueError("remove with no object is not supported, for your protection")
if isinstance(object, H2OFrame): H2OConnection.delete("DKV/"+object._id)
if isinstance(object, str): H2OConnection.delete("DKV/"+object)
def remove_all():
"""
Remove all objects from H2O.
:return None
"""
H2OConnection.delete("DKV")
def removeFrameShallow(key):
"""
Do a shallow DKV remove of the frame (does not remove any internal Vecs).
This is a "soft" delete. Just removes the top level pointer, but all big data remains!
:param key: A Frame Key to be removed
:return: None
"""
rapids("(removeframe '"+key+"')")
return None
def rapids(expr, id=None):
"""
Fire off a Rapids expression.
:param expr: The rapids expression (ascii string).
:return: The JSON response of the Rapids execution
"""
if isinstance(expr, list): expr = ExprNode._collapse_sb(expr)
expr = "(= !{} {})".format(id,expr) if id is not None else expr
result = H2OConnection.post_json("Rapids", ast=urllib.quote(expr), _rest_version=99)
if result['error'] is not None:
raise EnvironmentError("rapids expression not evaluated: {0}".format(str(result['error'])))
return result
def ls():
"""
List Keys on an H2O Cluster
:return: Returns a list of keys in the current H2O instance
"""
return H2OFrame(expr=ExprNode("ls")).as_data_frame()
def frame(frame_id, exclude=""):
"""
Retrieve metadata for a id that points to a Frame.
:param frame_id: A pointer to a Frame in H2O.
:return: Meta information on the frame
"""
return H2OConnection.get_json("Frames/" + urllib.quote(frame_id+exclude))
def frames():
"""
Retrieve all the Frames.
:return: Meta information on the frames
"""
return H2OConnection.get_json("Frames")
def download_pojo(model,path="", get_jar=True):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
:param model: Retrieve this model's scoring POJO.
:param path: An absolute path to the directory where POJO should be saved.
:param get_jar: Retrieve the h2o genmodel jar also.
:return: None
"""
java = H2OConnection.get( "Models.java/"+model._id )
file_path = path + "/" + model._id + ".java"
if path == "": print java.text
else:
with open(file_path, 'w') as f:
f.write(java.text)
if get_jar and path!="":
url = H2OConnection.make_url("h2o-genmodel.jar")
filename = path + "/" + "h2o-genmodel.jar"
response = urllib2.urlopen(url)
with open(filename, "w") as f:
f.write(response.read())
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough hard drive space to accommodate the entire file.
:param data: An H2OFrame object to be downloaded.
:param filename: A string indicating the name that the CSV file should be should be saved to.
:return: None
"""
data._eager()
if not isinstance(data, H2OFrame): raise(ValueError, "`data` argument must be an H2OFrame, but got " + type(data))
url = "http://{}:{}/3/DownloadDataset?frame_id={}".format(H2OConnection.ip(),H2OConnection.port(),data._id)
with open(filename, 'w') as f: f.write(urllib2.urlopen(url).read())
def download_all_logs(dirname=".",filename=None):
"""
Download H2O Log Files to Disk
:param dirname: (Optional) A character string indicating the directory that the log file should be saved in.
:param filename: (Optional) A string indicating the name that the CSV file should be
:return: path of logs written (as a string)
"""
url = 'http://{}:{}/Logs/download'.format(H2OConnection.ip(),H2OConnection.port())
response = urllib2.urlopen(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename == None:
for h in response.headers.headers:
if 'filename=' in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname,filename)
print "Writing H2O logs to " + path
with open(path, 'w') as f: f.write(urllib2.urlopen(url).read())
return path
def save_model(model, path="", force=False):
"""
Save an H2O Model Object to Disk.
:param model: The model object to save.
:param path: A path to save the model at (hdfs, s3, local)
:param force: Overwrite destination directory in case it exists or throw exception if set to false.
:return: the path of the saved model (string)
"""
path=os.path.join(os.getcwd() if path=="" else path,model._id)
return H2OConnection.get_json("Models.bin/"+model._id,dir=path,force=force,_rest_version=99)["dir"]
def load_model(path):
"""
Load a saved H2O model from disk.
Example:
>>> path = h2o.save_model(my_model,dir=my_path)
>>> h2o.load_model(path) # use the result of save_model
:param path: The full path of the H2O Model to be imported.
:return: the model
"""
res = H2OConnection.post_json("Models.bin/",dir=path,_rest_version=99)
return get_model(res['models'][0]['model_id']['name'])
def cluster_status():
"""
TODO: This isn't really a cluster status... it's a node status check for the node we're connected to.
This is possibly confusing because this can come back without warning,
but if a user tries to do any remoteSend, they will get a "cloud sick warning"
Retrieve information on the status of the cluster running H2O.
:return: None
"""
cluster_json = H2OConnection.get_json("Cloud?skip_ticks=true")
print "Version: {0}".format(cluster_json['version'])
print "Cloud name: {0}".format(cluster_json['cloud_name'])
print "Cloud size: {0}".format(cluster_json['cloud_size'])
if cluster_json['locked']: print "Cloud is locked\n"
else: print "Accepting new members\n"
if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0:
print "No nodes found"
return
status = []
for node in cluster_json['nodes']:
for k, v in zip(node.keys(),node.values()):
if k in ["h2o", "healthy", "last_ping", "num_cpus", "sys_load", "mem_value_size", "total_value_size",
"free_mem", "tot_mem", "max_mem", "free_disk", "max_disk", "pid", "num_keys", "tcps_active",
"open_fds", "rpcs_active"]: status.append(k+": {0}".format(v))
print ', '.join(status)
print
def init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False,
license=None, max_mem_size_GB=None, min_mem_size_GB=None, ice_root=None, strict_version_check=False):
"""
Initiate an H2O connection to the specified ip and port.
:param ip: A string representing the hostname or IP address of the server where H2O is running.
:param port: A port, default is 54321
:param size: THe expected number of h2o instances (ignored if start_h2o is True)
:param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
:param enable_assertions: If start_h2o, pass `-ea` as a VM option.s
:param license: If not None, is a path to a license file.
:param max_mem_size_GB: Maximum heap size (jvm option Xmx) in gigabytes.
:param min_mem_size_GB: Minimum heap size (jvm option Xms) in gigabytes.
:param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:return: None
"""
H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,ice_root=ice_root,strict_version_check=strict_version_check)
return None
def export_file(frame,path,force=False):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to. To view the current session, call h2o.cluster_info().
:param frame: The Frame to save to disk.
:param path: The path to the save point on disk.
:param force: Overwrite any preexisting file with the same path
:return: None
"""
frame._eager()
H2OJob(H2OConnection.get_json("Frames/"+frame._id+"/export/"+path+"/overwrite/"+("true" if force else "false")), "Export File").poll()
def cluster_info():
"""
Display the current H2O cluster information.
:return: None
"""
H2OConnection._cluster_info()
def shutdown(conn=None, prompt=True):
"""
Shut down the specified instance. All data will be lost.
This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O instance.
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server.
:return: None
"""
if conn == None: conn = H2OConnection.current_connection()
H2OConnection._shutdown(conn=conn, prompt=prompt)
def deeplearning(x,y=None,validation_x=None,validation_y=None,training_frame=None,model_id=None,
overwrite_with_best_model=None,validation_frame=None,checkpoint=None,autoencoder=None,
use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None,
seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None,
momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None,
input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None,
initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None,
score_validation_samples=None,score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None,
max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None,
max_after_balance_size=None,score_validation_sampling=None,diagnostics=None,variable_importances=None,
fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None,
shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None,
max_categorical_features=None,reproducible=None,export_weights_and_biases=None,offset_column=None,weights_column=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None):
"""
Build a supervised Deep Learning model
Performs Deep Learning neural networks on an H2OFrame
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param overwrite_with_best_model: Logical. If True, overwrite the final model with the best model found during training. Defaults to True.
:param validation_frame: (Optional) An H2OFrame object indicating the validation dataset used to construct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0
:param checkpoint: "Model checkpoint (either key or H2ODeepLearningModel) to resume training with."
:param autoencoder: Enable auto-encoder for model building.
:param use_all_factor_levels: Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder.
:param activation: A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout"
:param hidden: Hidden layer sizes (e.g. c(100,100))
:param epochs: How many times the dataset should be iterated (streamed), can be fractional
:param train_samples_per_iteration: Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data); or -2 auto-tuning (default)
:param seed: Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
:param adaptive_rate: Logical. Adaptive learning rate (ADAELTA)
:param rho: Adaptive learning rate time decay factor (similarity to prior updates)
:param epsilon: Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4
:param rate: Learning rate (higher => less stable, lower => slower convergence)
:param rate_annealing: Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
:param rate_decay: Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
:param momentum_start: Initial momentum at the beginning of training (try 0.5)
:param momentum_ramp: Number of training samples for which momentum increases
:param momentum_stable: Final momentum after the amp is over (try 0.99)
:param nesterov_accelerated_gradient: Logical. Use Nesterov accelerated gradient (recommended)
:param input_dropout_ratio: A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling).
:param hidden_dropout_ratios: Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5
:param l1: L1 regularization (can add stability and improve generalization, causes many weights to become 0)
:param l2: L2 regularization (can add stability and improve generalization, causes many weights to be small)
:param max_w2: Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
:param initial_weight_distribution: Can be "Uniform", "UniformAdaptive", or "Normal"
:param initial_weight_scale: Uniform: -value ... value, Normal: stddev
:param loss: Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental)
:param distribution: A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace", "huber" or "gaussian"
:param tweedie_power: Tweedie power (only for Tweedie distribution, must be between 1 and 2)
:param score_interval: Shortest time interval (in secs) between model scoring
:param score_training_samples: Number of training set samples for scoring (0 for all)
:param score_validation_samples: Number of validation set samples for scoring (0 for all)
:param score_duty_cycle: Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring)
:param classification_stop: Stopping criterion for classification error fraction on training data (-1 to disable)
:param regression_stop: Stopping criterion for regression error (MSE) on training data (-1 to disable)
:param quiet_mode: Enable quiet mode for less output to standard output
:param max_confusion_matrix_size: Max. size (number of classes) for confusion matrices to be shown
:param max_hit_ratio_k: Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable)
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data)
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param score_validation_sampling: Method used to sample validation dataset for scoring
:param diagnostics: Enable diagnostics for hidden layers
:param variable_importances: Compute variable importances for input features (Gedeon method) - can be slow for large networks)
:param fast_mode: Enable fast mode (minor approximations in back-propagation)
:param ignore_const_cols: Ignore constant columns (no information can be gained anyway)
:param force_load_balance: Force extra load balancing to increase training speed for small datasets (to keep all cores busy)
:param replicate_training_data: Replicate the entire training dataset onto every node for faster training
:param single_node_mode: Run on a single node for fine-tuning of model parameters
:param shuffle_training_data: Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes
:param sparse: Sparse data handling (Experimental)
:param col_major: Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental)
:param average_activation: Average activation for sparse auto-encoder (Experimental)
:param sparsity_beta: Sparsity regularization (Experimental)
:param max_categorical_features: Max. number of categorical features, enforced via hashing Experimental)
:param reproducible: Force reproducibility on small data (will be slow - only uses 1 thread)
:param export_weights_and_biases: Whether to export Neural Network weights and biases to H2O Frames"
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:return: Return a new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["y","training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="deeplearning"
return h2o_model_builder.supervised(parms)
def autoencoder(x,training_frame=None,model_id=None,overwrite_with_best_model=None,checkpoint=None,
use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None,
seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None,
momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None,
input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None,
initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None,
score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None,
max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None,
max_after_balance_size=None,diagnostics=None,variable_importances=None,
fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None,
shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None,
max_categorical_features=None,reproducible=None,export_weights_and_biases=None):
"""
Build unsupervised auto encoder using H2O Deeplearning
:param x: An H2OFrame containing the predictors in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param overwrite_with_best_model: Logical. If True, overwrite the final model with the best model found during training. Defaults to True.
:param checkpoint: "Model checkpoint (either key or H2ODeepLearningModel) to resume training with."
:param use_all_factor_levels: Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder.
:param activation: A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout"
:param hidden: Hidden layer sizes (e.g. c(100,100))
:param epochs: How many times the dataset should be iterated (streamed), can be fractional
:param train_samples_per_iteration: Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data); or -2 auto-tuning (default)
:param seed: Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
:param adaptive_rate: Logical. Adaptive learning rate (ADAELTA)
:param rho: Adaptive learning rate time decay factor (similarity to prior updates)
:param epsilon: Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4
:param rate: Learning rate (higher => less stable, lower => slower convergence)
:param rate_annealing: Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
:param rate_decay: Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
:param momentum_start: Initial momentum at the beginning of training (try 0.5)
:param momentum_ramp: Number of training samples for which momentum increases
:param momentum_stable: Final momentum after the amp is over (try 0.99)
:param nesterov_accelerated_gradient: Logical. Use Nesterov accelerated gradient (recommended)
:param input_dropout_ratio: A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling).
:param hidden_dropout_ratios: Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5
:param l1: L1 regularization (can add stability and improve generalization, causes many weights to become 0)
:param l2: L2 regularization (can add stability and improve generalization, causes many weights to be small)
:param max_w2: Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
:param initial_weight_distribution: Can be "Uniform", "UniformAdaptive", or "Normal"
:param initial_weight_scale: Uniform: -value ... value, Normal: stddev
:param loss: Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental)
:param distribution: A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace", "huber" or "gaussian"
:param tweedie_power: Tweedie power (only for Tweedie distribution, must be between 1 and 2)
:param score_interval: Shortest time interval (in secs) between model scoring
:param score_training_samples: Number of training set samples for scoring (0 for all)
:param score_duty_cycle: Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring)
:param classification_stop: Stopping criterion for classification error fraction on training data (-1 to disable)
:param regression_stop: Stopping criterion for regression error (MSE) on training data (-1 to disable)
:param quiet_mode: Enable quiet mode for less output to standard output
:param max_confusion_matrix_size: Max. size (number of classes) for confusion matrices to be shown
:param max_hit_ratio_k: Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable)
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data)
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param diagnostics: Enable diagnostics for hidden layers
:param variable_importances: Compute variable importances for input features (Gedeon method) - can be slow for large networks)
:param fast_mode: Enable fast mode (minor approximations in back-propagation)
:param ignore_const_cols: Ignore constant columns (no information can be gained anyway)
:param force_load_balance: Force extra load balancing to increase training speed for small datasets (to keep all cores busy)
:param replicate_training_data: Replicate the entire training dataset onto every node for faster training
:param single_node_mode: Run on a single node for fine-tuning of model parameters
:param shuffle_training_data: Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes
:param sparse: Sparse data handling (Experimental)
:param col_major: Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental)
:param average_activation: Average activation for sparse auto-encoder (Experimental)
:param sparsity_beta: Sparsity regularization (Experimental)
:param max_categorical_features: Max. number of categorical features, enforced via hashing Experimental)
:param reproducible: Force reproducibility on small data (will be slow - only uses 1 thread)
:param export_weights_and_biases: Whether to export Neural Network weights and biases to H2O Frames"
:return: Return a new autoencoder
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="deeplearning"
parms["autoencoder"]=True
return h2o_model_builder.unsupervised(parms)
def gbm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,
distribution=None,tweedie_power=None,ntrees=None,max_depth=None,min_rows=None,
learn_rate=None,nbins=None,nbins_cats=None,validation_frame=None,
balance_classes=None,max_after_balance_size=None,seed=None,build_tree_one_node=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None,
score_each_iteration=None,offset_column=None,weights_column=None,do_future=None,checkpoint=None):
"""
Builds gradient boosted classification trees, and gradient boosted regression trees on a parsed data set.
The default distribution function will guess the model type based on the response column typerun properly the
response column must be an numeric for "gaussian" or an enum for "bernoulli" or "multinomial".
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param distribution: A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie" or "gaussian"
:param tweedie_power: Tweedie power (only for Tweedie distribution, must be between 1 and 2)
:param ntrees: A non-negative integer that determines the number of trees to grow.
:param max_depth: Maximum depth to grow the tree.
:param min_rows: Minimum number of rows to assign to terminal nodes.
:param learn_rate: An integer from 0.0 to 1.0
:param nbins: For numerical columns (real/int), build a histogram of this many bins, then split at the best point
:param nbins_cats: For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.
:param validation_frame: An H2OFrame object indicating the validation dataset used to contruct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0
:param balance_classes: logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data)
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param seed: Seed for random numbers (affects sampling when balance_classes=T)
:param build_tree_one_node: Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:param score_each_iteration: Attempts to score each tree.
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:return: A new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="gbm"
return h2o_model_builder.supervised(parms)
def glm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,validation_frame=None,
max_iterations=None,beta_epsilon=None,solver=None,standardize=None,family=None,link=None,
tweedie_variance_power=None,tweedie_link_power=None,alpha=None,prior=None,lambda_search=None,
nlambdas=None,lambda_min_ratio=None,beta_constraints=None,offset_column=None,weights_column=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None,
intercept=None, Lambda=None, max_active_predictors=None, do_future=None, checkpoint=None):
"""
Build a Generalized Linear Model
Fit a generalized linear model, specified by a response variable, a set of predictors, and a description of the error
distribution.
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param validation_frame: An H2OFrame object containing the variables in the model.
:param max_iterations: A non-negative integer specifying the maximum number of iterations.
:param beta_epsilon: A non-negative number specifying the magnitude of the maximum difference between the coefficient estimates from successive iterations. Defines the convergence criterion for h2o.glm.
:param solver: A character string specifying the solver used: IRLSM (supports more features), L_BFGS (scales better for datasets with many columns)
:param standardize: A logical value indicating whether the numeric predictors should be standardized to have a mean of 0 and a variance of 1 prior to training the models.
:param family: A character string specifying the distribution of the model: gaussian, binomial, poisson, gamma, tweedie.
:param link: A character string specifying the link function. The default is the canonical link for the family.
The supported links for each of the family specifications are:
"gaussian": "identity", "log", "inverse"
"binomial": "logit", "log"
"poisson": "log", "identity"
"gamma": "inverse", "log", "identity"
"tweedie": "tweedie"
:param tweedie_variance_power: A numeric specifying the power for the variance function when family = "tweedie".
:param tweedie_link_power: A numeric specifying the power for the link function when family = "tweedie".
:param alpha: A numeric in [0, 1] specifying the elastic-net mixing parameter.
The elastic-net penalty is defined to be:
eqn{P(\alpha,\beta) = (1-\alpha)/2||\beta||_2^2 + \alpha||\beta||_1 = \sum_j [(1-\alpha)/2 \beta_j^2 + \alpha|\beta_j|],
making alpha = 1 the lasso penalty and alpha = 0 the ridge penalty.
:param Lambda: A non-negative shrinkage parameter for the elastic-net, which multiplies \eqn{P(\alpha,\beta) in the objective function. When Lambda = 0, no elastic-net penalty is applied and ordinary generalized linear models are fit.
:param prior: (Optional) A numeric specifying the prior probability of class 1 in the response when family = "binomial". The default prior is the observational frequency of class 1.
:param lambda_search: A logical value indicating whether to conduct a search over the space of lambda values starting from the lambda max, given lambda is interpreted as lambda min.
:param nlambdas: The number of lambda values to use when lambda_search = TRUE.
:param lambda_min_ratio: Smallest value for lambda as a fraction of lambda.max. By default if the number of
observations is greater than the the number of variables then lambda_min_ratio = 0.0001; if the number of
observations is less than the number of variables then lambda_min_ratio = 0.01.
:param beta_constraints: A data.frame or H2OParsedData object with the columns ["names", "lower_bounds",
"upper_bounds", "beta_given"], where each row corresponds to a predictor in the GLM. "names" contains the predictor
names, "lower"/"upper_bounds", are the lower and upper bounds of beta, and "beta_given" is some supplied starting
values for the
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:param intercept: Logical, include constant term (intercept) in the model
:param max_active_predictors: (Optional) Convergence criteria for number of predictors when using L1 penalty.
:return: A subclass of ModelBase is returned. The specific subclass depends on the machine learning task at hand (if
it's binomial classification, then an H2OBinomialModel is returned, if it's regression then a H2ORegressionModel is
returned). The default print-out of the models is shown, but further GLM-specifc information can be queried out of
the object.
Upon completion of the GLM, the resulting object has coefficients, normalized coefficients, residual/null deviance,
aic, and a host of model metrics including MSE, AUC (for logistic regression), degrees of freedom, and confusion
matrices.
"""
parms = {k.lower():v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
if "alpha" in parms and not isinstance(parms["alpha"], (list,tuple)): parms["alpha"] = [parms["alpha"]]
parms["algo"]="glm"
return h2o_model_builder.supervised(parms)
def start_glm_job(x,y,validation_x=None,validation_y=None,**kwargs):
"""
Build a Generalized Linear Model
Note: this function is the same as glm(), but it doesn't block on model-build. Instead, it returns and H2OModelFuture
object immediately. The model can be retrieved from the H2OModelFuture object with get_future_model().
:return: H2OModelFuture
"""
kwargs["do_future"] = True
return glm(x,y,validation_x,validation_y,**kwargs)
def kmeans(x,validation_x=None,k=None,model_id=None,max_iterations=None,standardize=None,init=None,seed=None,
nfolds=None,fold_column=None,fold_assignment=None,training_frame=None,validation_frame=None,
user_points=None,ignored_columns=None,score_each_iteration=None,keep_cross_validation_predictions=None,
ignore_const_cols=None,checkpoint=None):
"""
Performs k-means clustering on an H2O dataset.
:param x: (Optional) A vector containing the data columns on which k-means operates.
:param k: The number of clusters. Must be between 1 and 1e7 inclusive. k may be omitted if the user specifies the
initial centers in the init parameter. If k is not omitted, in this case, then it should be equal to the number of
user-specified centers.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param max_iterations: The maximum number of iterations allowed. Must be between 0 and 1e6 inclusive.
:param standardize: Logical, indicates whether the data should be standardized before running k-means.
:param init: A character string that selects the initial set of k cluster centers. Possible values are "Random": for
random initialization, "PlusPlus": for k-means plus initialization, or "Furthest": for initialization at the furthest
point from each successive center. Additionally, the user may specify a the initial centers as a matrix, data.frame,
H2OFrame, or list of vectors. For matrices, data.frames, and H2OFrames, each row of the respective structure is an
initial center. For lists of vectors, each vector is an initial center.
:param seed: (Optional) Random seed used to initialize the cluster centroids.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:return: Returns an object of class H2OClusteringModel.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="kmeans"
return h2o_model_builder.unsupervised(parms)
def random_forest(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,mtries=None,sample_rate=None,
build_tree_one_node=None,ntrees=None,max_depth=None,min_rows=None,nbins=None,nbins_cats=None,
binomial_double_trees=None,validation_frame=None,balance_classes=None,max_after_balance_size=None,
seed=None,offset_column=None,weights_column=None,nfolds=None,fold_column=None,fold_assignment=None,
keep_cross_validation_predictions=None,checkpoint=None):
"""
Build a Big Data Random Forest Model
Builds a Random Forest Model on an H2OFrame
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param mtries: Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification, and p/3 for regression, where p is the number of predictors.
:param sample_rate: Sample rate, from 0 to 1.0.
:param build_tree_one_node: Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
:param ntrees: A nonnegative integer that determines the number of trees to grow.
:param max_depth: Maximum depth to grow the tree.
:param min_rows: Minimum number of rows to assign to teminal nodes.
:param nbins: For numerical columns (real/int), build a histogram of this many bins, then split at the best point.
:param nbins_cats: For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.
:param binomial_double_trees: For binary classification: Build 2x as many trees (one per class) - can lead to higher accuracy.
:param validation_frame: An H2OFrame object containing the variables in the model.
:param balance_classes: logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data)
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param seed: Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:return: A new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="drf"
return h2o_model_builder.supervised(parms)
def prcomp(x,validation_x=None,k=None,model_id=None,max_iterations=None,transform=None,seed=None,use_all_factor_levels=None,
training_frame=None,validation_frame=None,pca_method=None):
"""
Principal components analysis of a H2O dataset using the power method
to calculate the singular value decomposition of the Gram matrix.
:param k: The number of principal components to be computed. This must be between 1 and min(ncol(training_frame), nrow(training_frame)) inclusive.
:param model_id: (Optional) The unique hex key assigned to the resulting model. Automatically generated if none is provided.
:param max_iterations: The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive.
:param transform: A character string that indicates how the training data should be transformed before running PCA.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE":
for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE":
for demeaning and dividing each column by its range (max - min).
:param seed: (Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration.
:param use_all_factor_levels: (Optional) A logical value indicating whether all factor levels should be included
in each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of
every categorical variable will be dropped. Defaults to FALSE.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="pca"
return h2o_model_builder.unsupervised(parms)
def svd(x,validation_x=None,nv=None,max_iterations=None,transform=None,seed=None,use_all_factor_levels=None,
training_frame=None, validation_frame=None):
"""
Singular value decomposition of a H2O dataset using the power method.
:param nv: The number of right singular vectors to be computed. This must be between 1 and min(ncol(training_frame), snrow(training_frame)) inclusive.
:param max_iterations: The maximum number of iterations to run each power iteration loop. Must be between 1 and
1e6 inclusive.max_iterations The maximum number of iterations to run each power iteration loop. Must be between 1
and 1e6 inclusive.
:param transform: A character string that indicates how the training data should be transformed before running PCA.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for
dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for
demeaning and dividing each column by its range (max - min).
:param seed: (Optional) Random seed used to initialize the right singular vectors at the beginning of each power
method iteration.
:param use_all_factor_levels: (Optional) A logical value indicating whether all factor levels should be included in
each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of every
categorical variable will be dropped. Defaults to TRUE.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="svd"
parms['_rest_version']=99
return h2o_model_builder.unsupervised(parms)
def naive_bayes(x,y,validation_x=None,validation_y=None,training_frame=None,validation_frame=None,
laplace=None,threshold=None,eps=None,compute_metrics=None,offset_column=None,weights_column=None,
balance_classes=None,max_after_balance_size=None, nfolds=None,fold_column=None,fold_assignment=None,
keep_cross_validation_predictions=None,checkpoint=None):
"""
The naive Bayes classifier assumes independence between predictor variables conditional on the response, and a
Gaussian distribution of numeric predictors with mean and standard deviation computed from the training dataset.
When building a naive Bayes classifier, every row in the training dataset that contains at least one NA will be
skipped completely. If the test dataset has missing values, then those predictors are omitted in the probability
calculation during prediction.
:param laplace: A positive number controlling Laplace smoothing. The default zero disables smoothing.
:param threshold: The minimum standard deviation to use for observations without enough data. Must be at least 1e-10.
:param eps: A threshold cutoff to deal with numeric instability, must be positive.
:param compute_metrics: A logical value indicating whether model metrics should be computed. Set to FALSE to reduce the runtime of the algorithm.
:param training_frame: Training Frame
:param validation_frame: Validation Frame
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:return: Returns an H2OBinomialModel if the response has two categorical levels, H2OMultinomialModel otherwise.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="naivebayes"
return h2o_model_builder.supervised(parms)
def create_frame(id = None, rows = 10000, cols = 10, randomize = True, value = 0, real_range = 100,
categorical_fraction = 0.2, factors = 100, integer_fraction = 0.2, integer_range = 100,
binary_fraction = 0.1, binary_ones_fraction = 0.02, missing_fraction = 0.01, response_factors = 2,
has_response = False, seed=None):
"""
Data Frame Creation in H2O.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param id: A string indicating the destination key. If empty, this will be auto-generated by H2O.
:param rows: The number of rows of data to generate.
:param cols: The number of columns of data to generate. Excludes the response column if has_response == True.
:param randomize: A logical value indicating whether data values should be randomly generated. This must be TRUE if
either categorical_fraction or integer_fraction is non-zero.
:param value: If randomize == FALSE, then all real-valued entries will be set to this value.
:param real_range: The range of randomly generated real values.
:param categorical_fraction: The fraction of total columns that are categorical.
:param factors: The number of (unique) factor levels in each categorical column.
:param integer_fraction: The fraction of total columns that are integer-valued.
:param integer_range: The range of randomly generated integer values.
:param binary_fraction: The fraction of total columns that are binary-valued.
:param binary_ones_fraction: The fraction of values in a binary column that are set to 1.
:param missing_fraction: The fraction of total entries in the data frame that are set to NA.
:param response_factors: If has_response == TRUE, then this is the number of factor levels in the response column.
:param has_response: A logical value indicating whether an additional response column should be pre-pended to the
final H2O data frame. If set to TRUE, the total number of columns will be cols+1.
:param seed: A seed used to generate random values when randomize = TRUE.
:return: the H2OFrame that was created
"""
parms = {"dest": _py_tmp_key() if id is None else id,
"rows": rows,
"cols": cols,
"randomize": randomize,
"value": value,
"real_range": real_range,
"categorical_fraction": categorical_fraction,
"factors": factors,
"integer_fraction": integer_fraction,
"integer_range": integer_range,
"binary_fraction": binary_fraction,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"response_factors": response_factors,
"has_response": has_response,
"seed": -1 if seed is None else seed,
}
H2OJob(H2OConnection.post_json("CreateFrame", **parms), "Create Frame").poll()
return get_frame(parms["dest"])
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factors Factor columns (either indices or column names).
:param pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all
factor will be made)
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: A string indicating the destination key. If empty, this will be auto-generated by H2O.
:return: H2OFrame
"""
data._eager()
factors = [data.names[n] if isinstance(n,int) else n for n in factors]
parms = {"dest": _py_tmp_key() if destination_frame is None else destination_frame,
"source_frame": data._id,
"factor_columns": [_quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(H2OConnection.post_json("Interaction", **parms), "Interactions").poll()
return get_frame(parms["dest"])
def network_test():
res = H2OConnection.get_json(url_suffix="NetworkTest")
res["table"].show()
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
:param path: Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
def store_size():
"""
Get the H2O store size (current count of keys).
:return: number of keys in H2O cloud
"""
return rapids("(store_size)")["result"]
def keys_leaked(num_keys):
"""
Ask H2O if any keys leaked.
@param num_keys: The number of keys that should be there.
:return: A boolean True/False if keys leaked. If keys leaked, check H2O logs for further detail.
"""
return rapids("keys_leaked #{})".format(num_keys))["result"]=="TRUE"
def as_list(data, use_pandas=True):
"""
Convert an H2O data object into a python-specific object.
WARNING: This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame.
Otherwise, a list-of-lists populated by character data will be returned (so the types of data will
all be str).
:param data: An H2O data object.
:param use_pandas: Try to use pandas for reading in the data.
:return: List of list (Rows x Columns).
"""
return H2OFrame.as_data_frame(data, use_pandas)
def set_timezone(tz):
"""
Set the Time Zone on the H2O Cloud
:param tz: The desired timezone.
:return: None
"""
rapids(ExprNode("setTimeZone", tz)._eager())
def get_timezone():
"""
Get the Time Zone on the H2O Cloud
:return: the time zone (string)
"""
return H2OFrame(expr=ExprNode("getTimeZone"))._scalar()
def list_timezones():
"""
Get a list of all the timezones
:return: the time zones (as an H2OFrame)
"""
return H2OFrame(expr=ExprNode("listTimeZones"))._frame()
class H2ODisplay:
"""
Pretty printing for H2O Objects;
Handles both IPython and vanilla console display
"""
THOUSANDS = "{:,}"
def __init__(self,table=None,header=None,table_header=None,**kwargs):
self.table_header=table_header
self.header=header
self.table=table
self.kwargs=kwargs
self.do_print=True
# one-shot display... never return an H2ODisplay object (or try not to)
# if holding onto a display object, then may have odd printing behavior
# the __repr__ and _repr_html_ methods will try to save you from many prints,
# but just be WARNED that your mileage may vary!
#
# In other words, it's better to just new one of these when you're ready to print out.
if self.table_header is not None:
print
print self.table_header + ":"
print
if H2ODisplay._in_ipy():
from IPython.display import display
display(self)
self.do_print=False
else:
self.pprint()
self.do_print=False
# for Ipython
def _repr_html_(self):
if self.do_print:
return H2ODisplay._html_table(self.table,self.header)
def pprint(self):
r = self.__repr__()
print r
# for python REPL console
def __repr__(self):
if self.do_print or not H2ODisplay._in_ipy():
if self.header is None: return tabulate.tabulate(self.table,**self.kwargs)
else: return tabulate.tabulate(self.table,headers=self.header,**self.kwargs)
self.do_print=True
return ""
@staticmethod
def _in_ipy(): # are we in ipy? then pretty print tables with _repr_html
try:
__IPYTHON__
return True
except NameError:
return False
# some html table builder helper things
@staticmethod
def _html_table(rows, header=None):
table= "<div style=\"overflow:auto\"><table style=\"width:50%\">{}</table></div>" # keep table in a div for scroll-a-bility
table_rows=[]
if header is not None:
table_rows.append(H2ODisplay._html_row(header))
for row in rows:
table_rows.append(H2ODisplay._html_row(row))
return table.format("\n".join(table_rows))
@staticmethod
def _html_row(row):
res = "<tr>{}</tr>"
entry = "<td>{}</td>"
entries = "\n".join([entry.format(str(r)) for r in row])
return res.format(entries)
def can_use_pandas():
try:
imp.find_module('pandas')
return True
except ImportError:
return False
# ALL DEPRECATED METHODS BELOW #
def h2o_deprecated(newfun=None):
def o(fun):
if newfun is not None: m = "{} is deprecated. Use {}.".format(fun.__name__,newfun.__name__)
else: m = "{} is deprecated.".format(fun.__name__)
@functools.wraps(fun)
def i(*args, **kwargs):
print
print
warnings.warn(m, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return i
return o
@h2o_deprecated(import_file)
def import_frame(path=None):
"""
Deprecated for import_file.
:param path: A path specifiying the location of the data to import.
:return: A new H2OFrame
"""
warnings.warn("deprecated: Use import_file", DeprecationWarning)
return import_file(path)
|
PawarPawan/h2o-v3
|
h2o-py/h2o/h2o.py
|
Python
|
apache-2.0
| 75,865
|
[
"Gaussian"
] |
1b7ad9be1355b19055780eb33b00c62fa8757595cdab7f97194ffc9e851b9b5f
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.linalg\.solve_lyapunov', # deprecated name
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
int_pattern = re.compile('^[0-9]+L?$')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# python 2 long integers are equal to python 3 integers
if self.int_pattern.match(want) and self.int_pattern.match(got):
if want.rstrip("L\r\n") == got.rstrip("L\r\n"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
if sys.version_info.major <= 2:
with open(fname) as f:
text = f.read()
else:
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
gfyoung/scipy
|
tools/refguide_check.py
|
Python
|
bsd-3-clause
| 31,064
|
[
"Gaussian"
] |
080f61ac906e78cc22ebdf4cc69701dede94b79175fdefedcb263de83e72fdc6
|
import sys
import os.path
#sys.path.insert(0, '/home/andy/theano/tool_examples/theano-lstm-0.0.15')
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
from utilities import *
import dill
import argparse
#import cPickle
import pickle
import numpy
from collections import OrderedDict
import theano, theano.tensor as T
import turing_model
from theano_toolkit.parameters import Parameters
from theano.compile.nanguardmode import NanGuardMode
DESCRIPTION = """
Recurrent neural network based statistical language modelling toolkit
(based on LSTM algorithm)
Implemented by Daniel Soutner,
Department of Cybernetics, University of West Bohemia, Plzen, Czech rep.
dsoutner@kky.zcu.cz, 2013
"""
def parse_args(parser):
parser.add_argument('--train', nargs=1, action="store", metavar="FILE",
help='training file !')
parser.add_argument('--valid', nargs=1, action="store", metavar="FILE",
help='valid file !')
parser.add_argument('--test', nargs=1, action="store", metavar="FILE",
help='testing file for ppl!')
parser.add_argument('--neuron-type', action="store", dest='celltype',
help='type of hidden neurons, RNN/LSTM, default: RNN', type=str, default='RNN')
parser.add_argument('--train-method', action="store", dest='train_method',
help='training method LSTM/TURING/ALL, default: ALL', type=str, default='ALL')
parser.add_argument('--projection-size', action="store", dest='n_projection',
help='Number of neurons in projection layer, default: 100', type=int, default=100)
parser.add_argument('--hidden-size', action="store", dest='n_hidden',
help='Number of neurons in hidden layer, default: 100', type=int, default=100)
parser.add_argument('--stack', action="store", dest='n_stack',
help='Number of hidden neurons, default: 1 ', type=int, default=1)
parser.add_argument('--learning-rate', action="store", dest='lr',
help='learing rate at begining, default: 0.01 ', type=float, default=0.01)
parser.add_argument('--improvement-rate', action="store", dest='improvement_rate',
help='relative improvement for early stopping on ppl , default: 0.005 ', type=float, default=0.005)
parser.add_argument('--minibatch-size', action="store", dest='minibatch_size',
help='minibatch size for training, default: 100', type=int, default=100)
parser.add_argument('--max-epoch', action="store", dest='max_epoch',
help='maximum number of epoch if not early stopping, default: 1000', type=int, default=1000)
parser.add_argument('--early-stop', action="store", dest='early_stop',
help='1 for early-stopping, 0 for not', type=int, default=1)
parser.add_argument('--save-net', action="store", dest="save_net", default=None, metavar="FILE",
help="Save RNN to file")
parser.add_argument('--load-net', action="store", dest="load_net", default=None, metavar="FILE",
help="Load RNN from file")
return parser.parse_args()
def build_vocab(data_file_str):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
lines.append(tokens)
data_file.close()
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
return vocab
def load_data(data_file_str, vocab, data_type):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
# abandom too long sent in training set., too long sent will take too many time and decrease preformance
tokens_for_count = line.replace('\n','').split(' ')
if len(tokens_for_count) > 50 and data_type == 'train':
continue
lines.append(tokens)
data_file.close()
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
return numerical_lines, numerical_lengths
def softmax(x):
"""
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
"""
return T.nnet.softmax(x.T)
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
"""
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
"""
def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):
# core layer in RNN/LSTM
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
self.turing_params = Parameters()
#init turing machine model
self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , input_size, vocab_size)
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
#change by darong #issue : what is greedy
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_lstm_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = 0.01
self.turing_lr = 1e-7
self.all_lr = 1e-6
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
# create ppl
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
def save(self, save_file, vocab):
pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
def save_turing(self, save_file):
self.turing_params.save(save_file + '.turing')
def load(self, load_file, lr):
self.model = pickle.load(open(load_file, "rb"))
if os.path.isfile(load_file + '.turing') :
self.turing_params.load(load_file + '.turing')
else :
print "no turing model!!!! pretrain with lstm param"
self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
self.turing_params['memory_init'] = self.model.layers[0].params[0].get_value()[:1,:]
# need to compile again for calculating predictions after loading lstm
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = lr
self.turing_lr = lr#change this
self.all_lr = lr
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
print "done loading model"
# print "done compile"
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_lstm_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_final_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info_temp = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
outputs_info = [None] + outputs_info_temp
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
hidden_size = result[-2].shape[2]/2
turing_result = self.turing_predict(result[-2][:,:,hidden_size:],result[-3])
#the last layer do transpose before compute
return turing_result.transpose((1,0,2))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.lstm_cost = masked_loss(self.lstm_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
self.final_cost = masked_loss(self.final_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
def create_predict_function(self):
self.lstm_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.lstm_predictions,
allow_input_downcast=True
)
self.final_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.final_predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.lstm_cost, self.params, method="SGD", lr=self.lstm_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.lstm_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_cost,
updates=updates,
allow_input_downcast=True)
updates_turing = self.turing_updates(self.final_cost , lr=self.turing_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.turing_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_turing,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True),
allow_input_downcast=True)
all_updates_lstm, _, _, _, _ = create_optimization_updates(self.final_cost, self.params, method="SGD", lr=self.all_lr,part=True)
all_updates_turing_temp = self.turing_updates(self.final_cost , lr=self.all_lr)
updates_all = all_updates_lstm
for pair in all_updates_turing_temp :
updates_all[pair[0]] = pair[1]
self.all_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_all,
allow_input_downcast=True)
def create_lstm_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.lstm_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_final_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.final_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_ppl_function(self):
self.lstm_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_ppl,
allow_input_downcast=True)
self.final_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_ppl,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)#any problem??
def get_minibatch(full_data, full_lengths, minibatch_size, minibatch_idx):
lengths = []
for j in range(minibatch_size):
lengths.append(full_lengths[minibatch_size * minibatch_idx + j])
width = max(full_lengths)
# width = max(full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :])
height = minibatch_size
minibatch_data = np.empty([height, width], dtype=theano.config.floatX)
minibatch_data = full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :]
return minibatch_data, lengths
def training(args, vocab, train_data, train_lengths, valid_data, valid_lengths):
# training information
print 'training information'
print '-------------------------------------------------------'
print 'method: %s' % args.train_method
print 'vocab size: %d' % len(vocab)
print 'sentences in training file: %d' % len(train_lengths)
print 'max length in training file: %d' % max(train_lengths)
print 'train file: %s' % args.train[0]
print 'valid file: %s' % args.valid[0]
print 'type: %s' % args.celltype
print 'project: %d' % args.n_projection
print 'hidden: %d' % args.n_hidden
print 'stack: %d' % args.n_stack
print 'learning rate: %f' % args.lr
print 'minibatch size: %d' % args.minibatch_size
print 'max epoch: %d' % args.max_epoch
print 'improvement rate: %f' % args.improvement_rate
print 'save file: %s' % args.save_net
print 'load_model: %s' % args.load_net
print 'early-stop: %r' % args.early_stop
print '-------------------------------------------------------'
if args.celltype == 'LSTM':
celltype = LSTM
elif args.celltype == 'RNN':
celltype = RNN
print 'start initializing model'
# construct model & theano functions:
model = Model(
input_size=args.n_projection,
hidden_size=args.n_hidden,
vocab_size=len(vocab),
stack_size=args.n_stack, # make this bigger, but makes compilation slow
celltype=celltype # use RNN or LSTM
)
if args.lr :
model.lstm_lr = args.lr
model.turing_lr = args.lr
model.all_lr = args.lr
model.stop_on(vocab.word2index["."])
if args.load_net :
if args.lr :
model.load(args.load_net, args.lr)# 0 is useless
else :
model.load(args.load_net, 0)
# train:
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
update_fun = model.lstm_update_fun
ppl_fun = model.lstm_ppl_fun
lr = model.lstm_lr
print 'update lstm learning rate : %f' % model.lstm_lr
elif args.train_method == 'TURING' :
update_fun = model.turing_update_fun
ppl_fun = model.final_ppl_fun
lr = model.turing_lr
print 'update turing learning rate : %f' % model.turing_lr
else :
update_fun = model.all_update_fun
ppl_fun = model.final_ppl_fun
lr = model.all_lr
print 'update all learning rate : %f' % model.all_lr
stop_count = 0 # for stop training
change_count = 0 # for change learning rate
print 'start training'
min_valid_ppl = float('inf')
for epoch in range(args.max_epoch):
print "\nepoch %d" % epoch
# minibatch part
minibatch_size = args.minibatch_size # how many examples in a minibatch
n_train_batches = len(train_lengths)/minibatch_size
train_ppl = 0
for minibatch_idx in range(n_train_batches):
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, minibatch_size, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
sys.stdout.write( '\n%d minibatch idx / %d total minibatch, ppl: %f '% (minibatch_idx+1, n_train_batches, minibatch_train_ppl) )
sys.stdout.flush() # important
# rest minibatch if exits
if (minibatch_idx + 1) * minibatch_size != len(train_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(train_lengths) - minibatch_size * minibatch_idx
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, n_rest_example, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
train_ppl = train_ppl / sum(train_lengths)
# print 'done training'
# valid ppl
minibatch_size = min(20, len(valid_lengths))
valid_ppl = 0
n_valid_batches = len(valid_lengths)/minibatch_size
for minibatch_idx in range(n_valid_batches):
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, minibatch_size, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
# last minibatch
minibatch_idx = minibatch_idx + 1
n_rest_example = len(valid_lengths) - minibatch_size * minibatch_idx
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, n_rest_example, minibatch_idx)
minibatch_valid_ppl = ppl_fun(valid_data, valid_lengths)
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
valid_ppl = valid_ppl / sum(valid_lengths)
print "\ntrain ppl: %f, valid ppl: %f" % (train_ppl, valid_ppl)
if valid_ppl < min_valid_ppl:
min_valid_ppl = valid_ppl
model.save(args.save_net, vocab)
if args.train_method != 'LSTM' :
model.save_turing(args.save_net)
stop_count = 0
change_count = 0
print "save best model"
continue
if args.early_stop:
if (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate:
if stop_count > 2 or lr < 1e-6:
print 'stop training'
break
stop_count = stop_count + 1
elif (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate * 0.5:
# if change_count > 2:
print 'change learning rate from %f to %f' % (lr, lr/2)
model.lstm_lr = model.lstm_lr / 2.
model.turing_lr = model.turing_lr / 2.
model.all_lr = model.all_lr / 2.
if args.train_method == 'LSTM' :
lr = model.lstm_lr
elif args.train_method == 'TURING' :
lr = model.turing_lr
else :
lr = model.all_lr
# change_count = change_count + 1
def testing(args, test_data, test_lengths):
print 'start loading'
model_load = Model(
input_size=1,
hidden_size=1,
vocab_size=1,
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model_load.stop_on(vocab.word2index["."])
if args.train_method != 'LSTM' :
if not os.path.isfile(args.load_net + '.turing') :
print "there is no trained turing file so we can't test by turing model!!"
sys.exit()
model_load.load(args.load_net, 0)
# test ppl
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
ppl_fun = model_load.lstm_ppl_fun
else :
ppl_fun = model_load.final_ppl_fun
minibatch_size = min(20, len(test_lengths))
test_ppl = 0
n_test_batches = len(test_lengths)/minibatch_size
for minibatch_idx in range(n_test_batches):
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, minibatch_size, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
test_ppl = test_ppl + minibatch_test_ppl * sum(lengths)
# last minibatch
minibatch_idx = minibatch_idx + 1
n_rest_example = len(test_lengths) - minibatch_size * minibatch_idx
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, n_rest_example, minibatch_idx)
minibatch_test_ppl = ppl_fun(test_data, test_lengths)
test_ppl = test_ppl + minibatch_test_ppl * sum(lengths)
test_ppl = test_ppl / sum(test_lengths)
print "test ppl: %f" %test_ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parse_args(parser)
# if no args are passed
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
if args.train:
vocab = build_vocab(args.train[0])
train_data, train_lengths = load_data(args.train[0], vocab, 'train')
valid_data, valid_lengths = load_data(args.valid[0], vocab, 'valid')
training(args, vocab, train_data, train_lengths, valid_data, valid_lengths)
elif args.test:
vocab = pickle.load(open(args.load_net+'.vocab', "rb"))
test_data, test_lengths = load_data(args.test[0], vocab, 'test')
testing(args, test_data, test_lengths)
|
darongliu/Lstm_Turing_LM
|
lstm-neural-turing-machines-lm/v2-no-beta-gamma/lm_v4.py
|
Python
|
mit
| 25,515
|
[
"NEURON"
] |
e156548a2d21398fc6965d7cbdee10425a636b7d1f1d15d2ff117d59b51ee7a5
|
#!/usr/bin/env python
'''
Convert the k-sampled MO/integrals to corresponding Gamma-point supercell
MO/integrals.
Zhihao Cui <zcui@caltech.edu>
See also the original implementation at
https://github.com/zhcui/local-orbital-and-cdft/blob/master/k2gamma.py
'''
import numpy as np
from pyscf.pbc import gto, dft
from pyscf.pbc import tools
from pyscf.pbc.tools import k2gamma
cell = gto.Cell()
cell.atom = '''
H 0. 0. 0.
H 0.5 0.3 0.4
'''
cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = np.eye(3) * 4.
cell.unit='B'
cell.build()
kmesh = [2, 2, 1]
kpts = cell.make_kpts(kmesh)
print("Transform k-point integrals to supercell integral")
scell, phase = k2gamma.get_phase(cell, kpts)
NR, Nk = phase.shape
nao = cell.nao
s_k = cell.pbc_intor('int1e_ovlp', kpts=kpts)
s = scell.pbc_intor('int1e_ovlp')
s1 = np.einsum('Rk,kij,Sk->RiSj', phase, s_k, phase.conj())
print(abs(s-s1.reshape(s.shape)).max())
s = scell.pbc_intor('int1e_ovlp').reshape(NR,nao,NR,nao)
s1 = np.einsum('Rk,RiSj,Sk->kij', phase.conj(), s, phase)
print(abs(s1-s_k).max())
kmf = dft.KRKS(cell, kpts)
ekpt = kmf.run()
mf = k2gamma.k2gamma(kmf, kmesh)
c_g_ao = mf.mo_coeff
# The following is to check whether the MO is correctly coverted:
print("Supercell gamma MO in AO basis from conversion:")
scell = tools.super_cell(cell, kmesh)
mf_sc = dft.RKS(scell)
s = mf_sc.get_ovlp()
mf_sc.run()
sc_mo = mf_sc.mo_coeff
nocc = scell.nelectron // 2
print("Supercell gamma MO from direct calculation:")
print(np.linalg.det(c_g_ao[:,:nocc].T.conj().dot(s).dot(sc_mo[:,:nocc])))
print(np.linalg.svd(c_g_ao[:,:nocc].T.conj().dot(s).dot(sc_mo[:,:nocc]))[1])
|
gkc1000/pyscf
|
examples/pbc/42-k2gamma.py
|
Python
|
apache-2.0
| 1,630
|
[
"PySCF"
] |
02b11babd100a379433375a51df1aee895db78af63b102a580a3fe5402ce8e57
|
import numpy as np
from ase import Atoms
from gpaw import GPAW
from gpaw.wavefunctions.pw import PW
from gpaw.test import equal
bulk = Atoms('Li', pbc=True)
k = 4
calc = GPAW(mode=PW(200), kpts=(k, k, k))
bulk.set_calculator(calc)
e = []
niter = []
A = [2.6, 2.65, 2.7, 2.75, 2.8]
for a in A:
bulk.set_cell((a, a, a))
e.append(bulk.get_potential_energy())
a = np.roots(np.polyder(np.polyfit(A, e, 2), 1))[0]
print 'a =', a
equal(a, 2.65380064, 0.001)
|
ajylee/gpaw-rtxs
|
gpaw/test/pw/bulk.py
|
Python
|
gpl-3.0
| 461
|
[
"ASE",
"GPAW"
] |
f46b94591d15d418f152332e00798091e0938c40a1ad9a6daddced237c4a6a68
|
""" This is a test of the chain
SiteStatus -> ResourceStatusClient -> ResourceStatusDB
It supposes that the DB is present, and that the service is running
"""
# pylint: disable=invalid-name,wrong-import-position
from datetime import datetime
import unittest
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
Datetime = datetime.now()
testSite = 'test1234.test.test'
class TestClientSiteStatusTestCase(unittest.TestCase):
def setUp(self):
self.rsClient = ResourceStatusClient()
self.stClient = SiteStatus()
self.stClient.rssFlag = True
def tearDown(self):
pass
class ClientChain(TestClientSiteStatusTestCase):
def test_addAndRemove(self):
# make sure that the test sites are not presented in the db
self.rsClient.deleteStatusElement('Site', 'Status', testSite)
self.rsClient.deleteStatusElement('Site', 'Status', 'testActive1.test.test')
self.rsClient.deleteStatusElement('Site', 'Status', 'testActive.test.test')
self.rsClient.deleteStatusElement('Site', 'Status', 'testBanned.test.test')
# add test site
res = self.rsClient.insertStatusElement('Site', 'Status', testSite, 'all',
'Active', 'Site', 'Synchronized', Datetime,
Datetime, 'tokenOwner', Datetime)
self.assertTrue(res['OK'])
self.stClient.rssCache.refreshCache()
# TEST getSites
# ...............................................................................
result = self.stClient.getSites()
self.assertTrue(result['OK'])
self.assertTrue(testSite in result['Value'])
# TEST getSiteStatuses
# ...............................................................................
result = self.stClient.getSiteStatuses([testSite])
self.assertTrue(result['OK'])
self.assertEqual(result['Value'][testSite], "Active")
# TEST getUsableSites
# ...............................................................................
result = self.stClient.getUsableSites([testSite])
self.assertTrue(result['OK'])
self.assertEqual(result['Value'][0], testSite)
# finally delete the test site
res = self.rsClient.deleteStatusElement('Site', 'Status', testSite)
self.assertTrue(res['OK'])
# ...............................................................................
# adding some more test sites and more complex tests
# ...............................................................................
res = self.rsClient.insertStatusElement('Site', 'Status', 'testActive.test.test', 'all',
'Active', 'Site', 'Synchronized', Datetime,
Datetime, 'tokenOwner', Datetime)
self.assertTrue(res['OK'])
res = self.rsClient.insertStatusElement('Site', 'Status', 'testActive1.test.test', 'all',
'Active', 'Site', 'Synchronized', Datetime,
Datetime, 'tokenOwner', Datetime)
self.assertTrue(res['OK'])
res = self.rsClient.insertStatusElement('Site', 'Status', 'testBanned.test.test', 'all',
'Banned', 'Site', 'Synchronized', Datetime,
Datetime, 'tokenOwner', Datetime)
self.assertTrue(res['OK'])
self.stClient.rssCache.refreshCache()
# TEST getSites
# ...............................................................................
result = self.stClient.getSites()
self.assertTrue(result['OK'])
self.assertTrue('testActive1.test.test' in result['Value'])
self.assertTrue('testActive.test.test' in result['Value'])
self.assertFalse('testBanned.test.test' in result['Value'])
# TEST getSites
# ...............................................................................
result = self.stClient.getSites('All')
self.assertTrue(result['OK'])
self.assertTrue('testActive1.test.test' in result['Value'])
self.assertTrue('testActive.test.test' in result['Value'])
self.assertTrue('testBanned.test.test' in result['Value'])
# TEST getUsableSites
# ...............................................................................
result = self.stClient.getUsableSites()
self.assertTrue(result['OK'])
self.assertTrue('testActive1.test.test' in result['Value'])
self.assertTrue('testActive.test.test' in result['Value'])
# setting a status
result = self.stClient.setSiteStatus('testBanned.test.test', 'Probing')
self.assertTrue(result['OK'])
self.stClient.rssCache.refreshCache()
result = self.stClient.getSites('Probing')
self.assertTrue(result['OK'])
self.assertTrue('testBanned.test.test' in result['Value'])
self.assertFalse('testActive.test.test' in result['Value'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientSiteStatusTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ClientChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
arrabito/DIRAC
|
tests/Integration/ResourceStatusSystem/Test_SiteStatus.py
|
Python
|
gpl-3.0
| 5,316
|
[
"DIRAC"
] |
d85b6caae08b657842972f4a041e10336fbf0e952c1f2460b6a6b1c6999faa51
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
import numpy as np
import coords
import go
import mcts
from tests import test_utils
from absl import flags
FLAGS = flags.FLAGS
ALMOST_DONE_BOARD = test_utils.load_board('''
.XO.XO.OO
X.XXOOOO.
XXXXXOOOO
XXXXXOOOO
.XXXXOOO.
XXXXXOOOO
.XXXXOOO.
XXXXXOOOO
XXXXOOOOO
''')
TEST_POSITION = go.Position(
board=ALMOST_DONE_BOARD,
n=105,
komi=2.5,
caps=(1, 4),
ko=None,
recent=(go.PlayerMove(go.BLACK, (0, 1)),
go.PlayerMove(go.WHITE, (0, 8))),
to_play=go.BLACK
)
SEND_TWO_RETURN_ONE = go.Position(
board=ALMOST_DONE_BOARD,
n=75,
komi=0.5,
caps=(0, 0),
ko=None,
recent=(go.PlayerMove(go.BLACK, (0, 1)),
go.PlayerMove(go.WHITE, (0, 8)),
go.PlayerMove(go.BLACK, (1, 0))),
to_play=go.WHITE
)
class TestMctsNodes(test_utils.MinigoUnitTest):
def test_upper_bound_confidence(self):
probs = np.array([.02] * (go.N * go.N + 1))
root = mcts.MCTSNode(go.Position())
leaf = root.select_leaf()
self.assertEqual(root, leaf)
leaf.incorporate_results(probs, 0.5, root)
# 0.02 are normalized to 1/82
self.assertAlmostEqual(root.child_prior[0], 1/82)
self.assertAlmostEqual(root.child_prior[1], 1/82)
puct_policy = lambda n: 2.0 * (math.log((1.0 + n + FLAGS.c_puct_base)
/ FLAGS.c_puct_base) + FLAGS.c_puct_init) * 1/82
self.assertEqual(root.N, 1)
self.assertAlmostEqual(
root.child_U[0], puct_policy(root.N) * math.sqrt(1) / (1 + 0))
leaf = root.select_leaf()
self.assertNotEqual(root, leaf)
# With the first child expanded.
self.assertEqual(root.N, 1)
self.assertAlmostEqual(
root.child_U[0], puct_policy(root.N) * math.sqrt(1) / (1 + 0))
self.assertAlmostEqual(
root.child_U[1], puct_policy(root.N) * math.sqrt(1) / (1 + 0))
leaf.add_virtual_loss(up_to=root)
leaf2 = root.select_leaf()
self.assertNotIn(leaf2, (root, leaf))
leaf.revert_virtual_loss(up_to=root)
leaf.incorporate_results(probs, 0.3, root)
leaf2.incorporate_results(probs, 0.3, root)
# With the 2nd child expanded.
self.assertEqual(root.N, 3)
self.assertAlmostEqual(
root.child_U[0], puct_policy(root.N) * math.sqrt(2) / (1 + 1))
self.assertAlmostEqual(
root.child_U[1], puct_policy(root.N) * math.sqrt(2) / (1 + 1))
self.assertAlmostEqual(
root.child_U[2], puct_policy(root.N) * math.sqrt(2) / (1 + 0))
def test_action_flipping(self):
np.random.seed(1)
probs = np.array([.02] * (go.N * go.N + 1))
probs = probs + np.random.random([go.N * go.N + 1]) * 0.001
black_root = mcts.MCTSNode(go.Position())
white_root = mcts.MCTSNode(go.Position(to_play=go.WHITE))
black_root.select_leaf().incorporate_results(probs, 0, black_root)
white_root.select_leaf().incorporate_results(probs, 0, white_root)
# No matter who is to play, when we know nothing else, the priors
# should be respected, and the same move should be picked
black_leaf = black_root.select_leaf()
white_leaf = white_root.select_leaf()
self.assertEqual(black_leaf.fmove, white_leaf.fmove)
self.assertEqualNPArray(
black_root.child_action_score, white_root.child_action_score)
def test_select_leaf(self):
flattened = coords.to_flat(coords.from_gtp('D9'))
probs = np.array([.02] * (go.N * go.N + 1))
probs[flattened] = 0.4
root = mcts.MCTSNode(SEND_TWO_RETURN_ONE)
root.select_leaf().incorporate_results(probs, 0, root)
self.assertEqual(root.position.to_play, go.WHITE)
self.assertEqual(root.select_leaf(), root.children[flattened])
def test_backup_incorporate_results(self):
probs = np.array([.02] * (go.N * go.N + 1))
root = mcts.MCTSNode(SEND_TWO_RETURN_ONE)
root.select_leaf().incorporate_results(probs, 0, root)
leaf = root.select_leaf()
leaf.incorporate_results(probs, -1, root) # white wins!
# Root was visited twice: first at the root, then at this child.
self.assertEqual(root.N, 2)
# Root has 0 as a prior and two visits with value 0, -1
self.assertAlmostEqual(-1 / 3, root.Q) # average of 0, 0, -1
# Leaf should have one visit
self.assertEqual(1, root.child_N[leaf.fmove])
self.assertEqual(1, leaf.N)
# And that leaf's value had its parent's Q (0) as a prior, so the Q
# should now be the average of 0, -1
self.assertAlmostEqual(-0.5, root.child_Q[leaf.fmove])
self.assertAlmostEqual(-0.5, leaf.Q)
# We're assuming that select_leaf() returns a leaf like:
# root
# \
# leaf
# \
# leaf2
# which happens in this test because root is W to play and leaf was a W win.
self.assertEqual(go.WHITE, root.position.to_play)
leaf2 = root.select_leaf()
leaf2.incorporate_results(probs, -0.2, root) # another white semi-win
self.assertEqual(3, root.N)
# average of 0, 0, -1, -0.2
self.assertAlmostEqual(-0.3, root.Q)
self.assertEqual(2, leaf.N)
self.assertEqual(1, leaf2.N)
# average of 0, -1, -0.2
self.assertAlmostEqual(root.child_Q[leaf.fmove], leaf.Q)
self.assertAlmostEqual(-0.4, leaf.Q)
# average of -1, -0.2
self.assertAlmostEqual(-0.6, leaf.child_Q[leaf2.fmove])
self.assertAlmostEqual(-0.6, leaf2.Q)
def test_do_not_explore_past_finish(self):
probs = np.array([0.02] * (go.N * go.N + 1), dtype=np.float32)
root = mcts.MCTSNode(go.Position())
root.select_leaf().incorporate_results(probs, 0, root)
first_pass = root.maybe_add_child(coords.to_flat(None))
first_pass.incorporate_results(probs, 0, root)
second_pass = first_pass.maybe_add_child(coords.to_flat(None))
with self.assertRaises(AssertionError):
second_pass.incorporate_results(probs, 0, root)
node_to_explore = second_pass.select_leaf()
# should just stop exploring at the end position.
self.assertEqual(second_pass, node_to_explore)
def test_add_child(self):
root = mcts.MCTSNode(go.Position())
child = root.maybe_add_child(17)
self.assertIn(17, root.children)
self.assertEqual(root, child.parent)
self.assertEqual(17, child.fmove)
def test_add_child_idempotency(self):
root = mcts.MCTSNode(go.Position())
child = root.maybe_add_child(17)
current_children = copy.copy(root.children)
child2 = root.maybe_add_child(17)
self.assertEqual(child, child2)
self.assertEqual(current_children, root.children)
def test_never_select_illegal_moves(self):
probs = np.array([0.02] * (go.N * go.N + 1))
# let's say the NN were to accidentally put a high weight on an illegal move
probs[1] = 0.99
root = mcts.MCTSNode(SEND_TWO_RETURN_ONE)
root.incorporate_results(probs, 0, root)
# and let's say the root were visited a lot of times, which pumps up the
# action score for unvisited moves...
root.N = 100000
root.child_N[root.position.all_legal_moves()] = 10000
# this should not throw an error...
leaf = root.select_leaf()
# the returned leaf should not be the illegal move
self.assertNotEqual(1, leaf.fmove)
# and even after injecting noise, we should still not select an illegal move
for i in range(10):
root.inject_noise()
leaf = root.select_leaf()
self.assertNotEqual(1, leaf.fmove)
def test_dont_pick_unexpanded_child(self):
probs = np.array([0.001] * (go.N * go.N + 1))
# make one move really likely so that tree search goes down that path twice
# even with a virtual loss
probs[17] = 0.999
root = mcts.MCTSNode(go.Position())
root.incorporate_results(probs, 0, root)
root.N = 5
leaf1 = root.select_leaf()
self.assertEqual(17, leaf1.fmove)
leaf1.add_virtual_loss(up_to=root)
# the second select_leaf pick should return the same thing, since the child
# hasn't yet been sent to neural net for eval + result incorporation
leaf2 = root.select_leaf()
self.assertIs(leaf1, leaf2)
def test_normalize_policy(self):
# sum of probs > 1.0
probs = np.array([2.0] * (go.N * go.N + 1))
root = mcts.MCTSNode(TEST_POSITION)
root.incorporate_results(probs, 0, root)
root.N = 0
# Policy sums to 1.0, only legal moves have non-zero values.
self.assertAlmostEqual(1.0, sum(root.child_prior))
self.assertEqual(6, np.count_nonzero(root.child_prior))
self.assertEqual(0, sum(root.child_prior * root.illegal_moves))
def test_inject_noise_only_legal_moves(self):
probs = np.array([0.02] * (go.N * go.N + 1))
root = mcts.MCTSNode(TEST_POSITION)
root.incorporate_results(probs, 0, root)
root.N = 0
uniform_policy = 1 / sum(root.illegal_moves == 0)
expected_policy = uniform_policy * (1 - root.illegal_moves)
self.assertTrue((root.child_prior == expected_policy).all())
root.inject_noise()
# 0.75/0.25 derived from default dirichlet_noise_weight.
self.assertTrue((0.75 * expected_policy <= root.child_prior).all())
self.assertTrue(
(0.75 * expected_policy + 0.25 >= root.child_prior).all())
# Policy sums to 1.0, only legal moves have non-zero values.
self.assertAlmostEqual(1.0, sum(root.child_prior))
self.assertEqual(0, sum(root.child_prior * root.illegal_moves))
|
lablup/sorna-repl
|
vendor/benchmark/minigo/tests/test_mcts.py
|
Python
|
lgpl-3.0
| 10,518
|
[
"VisIt"
] |
d7bc1d6cc70125eed453186c3c13880d8d1cfab3ccd2b1e545b27fddd89386b6
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains the set of passes for Relay, which exposes an interface for
configuring the passes and scripting them in Python.
"""
from ...ir import IRModule
from ...relay import transform, build_module
from ...runtime.ndarray import cpu
from . import _ffi_api
from .feature import Feature
def post_order_visit(expr, fvisit):
"""Recursively visit the ir in post DFS order node,
apply fvisit. Each node is guaranteed to be visited
only once.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
fvisit : function
The visitor function to be applied.
"""
return _ffi_api.post_order_visit(expr, fvisit)
def well_formed(expr):
"""Check that each Var is only bound once (well formed).
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
well_form : bool
Whether the input expression is well formed
"""
return _ffi_api.well_formed(expr)
def check_kind(t, mod=None):
"""Check that the type is well kinded and return the kind.
For example, this mean type cannot has tensor of tensor, or is a tuple type
of 2 shapes.
Parameters
----------
t : tvm.relay.Type
The type to check
mod : Optional[tvm.IRModule]
The global module.
Returns
-------
kind : Kind
the kind of t
Examples
--------
.. code:: python
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Shape)])) == Shape
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Type)])) == Type
"""
if mod is not None:
return _ffi_api.check_kind(t, mod)
else:
return _ffi_api.check_kind(t)
def check_constant(expr):
"""Check whether an expression is constant
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
result : bool
Whether the expression is constant.
"""
return _ffi_api.check_constant(expr)
def check_basic_block_normal_form(expr):
"""Check whether an expression is in the basic block form
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
result : bool
Whether the expression is in the basic block form.
"""
return _ffi_api.check_basic_block_normal_form(expr)
def free_vars(expr):
"""Get free Vars from expression expr in Post DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables in post DFS order.
Note
----
The fact that Vars are post-DFS ordred are useful in
neural networks: usually this means weights of previous
are ordered first.
"""
return _ffi_api.free_vars(expr)
def bound_vars(expr):
"""Get bound vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of bound variables in post-DFS order.
"""
return _ffi_api.bound_vars(expr)
def all_vars(expr):
"""Get all vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of all variables in post-DFS order.
"""
return _ffi_api.all_vars(expr)
def free_type_vars(expr, mod=None):
"""Get free type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of free type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.free_type_vars(expr, use_mod)
def bound_type_vars(expr, mod=None):
"""Get bound type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of bound type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.bound_type_vars(expr, use_mod)
def all_type_vars(expr, mod=None):
"""Get all type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of all type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.all_type_vars(expr, use_mod)
def all_dtypes(expr):
"""Collect set of all data types used in `expr`.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
ret : Set[String]
Set of data types used in the expression (e.g., `{'int8', 'int32'}`)
"""
return set(_ffi_api.all_dtypes(expr))
def get_total_mac_number(expr):
"""
Count the number of MACs (multiply-accumulate) of a model
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
result : int64
The number of MACs (multiply-accumulate) of a model
"""
return _ffi_api.GetTotalMacNumber(expr)
def unmatched_cases(match, mod=None):
"""
Finds cases that the match expression does not catch, if any.
Parameters
----------
match : tvm.relay.Match
The match expression
mod : Optional[tvm.IRModule]
The module (defaults to an empty module)
Returns
-------
missing_patterns : [tvm.relay.Pattern]
Patterns that the match expression does not catch.
"""
return _ffi_api.unmatched_cases(match, mod)
def detect_feature(a, b=None):
"""
Detect the feature used in a relay program.
Parameters
----------
a : Union[tvm.relay.Expr, tvm.IRModule]
The input expression or module.
b : Optional[Union[tvm.relay.Expr, tvm.IRModule]]
The input expression or module.
The two arguments cannot both be expression or module.
Returns
-------
features : Set[Feature]
Features used in the program.
"""
if isinstance(a, IRModule):
a, b = b, a
return {Feature(int(x)) for x in _ffi_api.detect_feature(a, b)}
def extract_fused_functions(mod):
"""Pass to extract IRModule of only fused primitive functions.
The ExtractFusedFunctions pass invokes SimplifyInference, FuseOps(3),
and ExtractFusedFunctions in that order
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[int, tvm.relay.function.Function]
A module containing only fused primitive functions
"""
ret_mod = _ffi_api.ExtractFusedFunctions()(mod)
ret = {}
for hash_, func in ret_mod.functions.items():
ret[hash_] = func
return ret
def list_op_freqs(mod):
"""Pass to extract unique operator names and how frequently they appear
in an IRModule. Fused functions are traversed to count the operators
that compose them.
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[str, int]
Dict of unique operator names to frequency
"""
return _ffi_api.ExtractOperators(mod)
def search_fc_transpose(expr):
"""Search fc weight name in the patten: y = nn.dense(x, transpose(w, [1, 0]))
This function is used in the data_dep_optimization.simplify_fc_transpose method
Parameters
----------
expr : tvm.relay.Expr
Returns
-------
ret : Array[String]
Array of weight variable name in pattern y = nn.dense(x, transpose(w, [1, 0]))
"""
ret = _ffi_api.search_fc_transpose(expr)
return ret
def get_calibration_data(mod, data):
"""Get the calibration data of a given relay graph
This pass uses the graph executor to get the calibration data of a module, which
includes the input and output values of each function. The returned data uses
the GlobalVar of each function as a key. Users can further access the inputs and
outputs by using `inputs` or `outputs` as the key.
Following are some limitations:
1. The input module (graph) cannot have control flows.
2. The input arguments of each function cannot be tuples (outputs can be tuples).
3. We only handle top-level functions (i.e., nested function is not handled).
4. We only handle functions with `Compiler` attribute being set.
Parameters
----------
mod : tvm.IRModule
The input module for collecting the calibration data
data : Dict[str, NDArray]
The input data for running the module
Returns
-------
data : Dict[tvm.relay.GlobalVar, Dict[str, NDArray]]
"""
output_map = _ffi_api.get_calibrate_output_map(mod)
mod = _ffi_api.get_calibrate_module(mod)
mod = transform.Inline()(mod)
ref_res = build_module.create_executor("graph", mod=mod, device=cpu(0)).evaluate()(**data)
calib_data = {}
for gvar, indices in output_map.items():
offset = int(indices[0])
in_len = int(indices[1])
out_len = int(indices[2])
value = {
"inputs": ref_res[offset : offset + in_len],
"outputs": ref_res[offset + in_len : offset + in_len + out_len],
}
calib_data[gvar] = value
return calib_data
|
Laurawly/tvm-1
|
python/tvm/relay/analysis/analysis.py
|
Python
|
apache-2.0
| 10,670
|
[
"VisIt"
] |
ec88e6f9cdeb3e2f719e978dd3218f60361e310328fcce2841e728cc118569ca
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, sys
import logging
import warnings
import locale
from zeroinstall import localedir
if localedir:
# Tell GTK where to find the translations, if they're not in
# the default system location.
if hasattr(locale, 'bindtextdomain'):
locale.bindtextdomain('zero-install', localedir)
from optparse import OptionParser
from zeroinstall import _, SafeException
from zeroinstall.injector.config import load_config
from zeroinstall.support import tasks
_recalculate = tasks.Blocker('recalculate')
def recalculate():
"""Ask the mainloop to recalculate. If we're already recalculating, wait for that to finish
and then do it again."""
global _recalculate
_recalculate.trigger()
_recalculate = tasks.Blocker('recalculate')
def check_gui():
"""Returns True if the GUI works, or returns an exception if not."""
if sys.version_info[0] < 3:
try:
import pygtk; pygtk.require('2.0')
except ImportError as ex:
logging.info("No GUI available", exc_info = ex)
return ex
try:
if sys.version_info[0] > 2:
from zeroinstall.gtkui import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version = '3.0')
import gtk
except (ImportError, ValueError, RuntimeError) as ex:
logging.info("No GUI available", exc_info = ex)
return ex
if gtk.gdk.get_display() is None:
return SafeException("Failed to connect to display.")
return True
_gui_available = None
def gui_is_available(force_gui):
"""True if we have a usable GUI. False to fallback on console mode.
If force_gui is True, raise an exception if the GUI is missing."""
global _gui_available
if _gui_available is None:
with warnings.catch_warnings():
if not force_gui:
warnings.filterwarnings("ignore")
_gui_available = check_gui()
if _gui_available is True:
return True
if force_gui:
raise _gui_available
return False
class OCamlDriver:
def __init__(self, config):
self.config = config
self.watchers = []
def set_selections(self, ready, tree, sels):
self.ready = ready
self.tree = tree
self.sels = sels
for w in self.watchers: w()
def open_gui(args):
parser = OptionParser(usage=_("usage: %prog [options] interface"))
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("-d", "--download-only", help=_("fetch but don't run"), action='store_true')
parser.add_option("-g", "--force-gui", help=_("display an error if there's no GUI"), action='store_true')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("check for updates of all interfaces"), action='store_true')
parser.add_option("", "--select-only", help=_("only download the feeds"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
parser.add_option("", "--systray", help=_("download in the background"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
parser.add_option("", "--version-for", help=_("set version constraints for a specific interface"),
nargs=2, metavar='URI RANGE', action='append')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
parser.disable_interspersed_args()
(options, args) = parser.parse_args(args)
if options.verbose:
logger = logging.getLogger()
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if options.version:
from zeroinstall.gui import gui
print("0launch-gui (zero-install) " + gui.version)
print("Copyright (C) 2010 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
if not gui_is_available(options.force_gui):
sys.exit(100)
from zeroinstall.gui import gui
handler = gui.GUIHandler()
config = load_config(handler)
if options.with_store: # TODO: inherit from the main config
from zeroinstall import zerostore
for x in options.with_store:
config.stores.stores.append(zerostore.Store(os.path.abspath(x)))
assert len(args) > 0
interface_uri = args[0]
if len(args) > 1:
parser.print_help()
sys.exit(1)
from zeroinstall.gui import mainwindow, dialog
widgets = dialog.Template('main')
root_iface = config.iface_cache.get_interface(interface_uri)
finished = tasks.Blocker("GUI finished")
def resolve(result):
finished.gui_result = result
finished.trigger()
driver = OCamlDriver(config)
window = mainwindow.MainWindow(driver, widgets, download_only = bool(options.download_only), resolve = resolve, select_only = bool(options.select_only))
handler.mainwindow = window
if options.message:
window.set_message(options.message)
root = config.iface_cache.get_interface(interface_uri)
window.browser.set_root(root)
window.window.connect('destroy', lambda w: handler.abort_all_downloads())
if options.systray:
window.use_systray_icon(root_iface)
logger = logging.getLogger()
def prepare_for_recalc(force_refresh):
window.refresh_button.set_sensitive(False)
window.browser.set_update_icons(force_refresh)
if not window.systray_icon:
window.show()
force_refresh = bool(options.refresh)
prepare_for_recalc(force_refresh)
# Called each time a complete solve_with_downloads is done.
@tasks.async
def run_gui(reply_holder):
window.refresh_button.set_sensitive(True)
window.browser.highlight_problems()
if window.systray_icon and window.systray_icon.get_visible() and \
window.systray_icon.is_embedded():
if driver.ready:
window.systray_icon.set_tooltip(_('Downloading updates for %s') % root_iface.get_name())
window.run_button.set_active(True)
else:
# Should already be reporting an error, but
# blink it again just in case
window.systray_icon.set_blinking(True)
refresh_clicked = dialog.ButtonClickedBlocker(window.refresh_button)
yield refresh_clicked, _recalculate, finished
if finished.happened:
reply_holder.append([finished.gui_result])
else:
reply_holder.append(["recalculate", refresh_clicked.happened])
prepare_for_recalc(refresh_clicked.happened)
return (run_gui, driver)
|
linuxmidhun/0install
|
zeroinstall/gui/main.py
|
Python
|
lgpl-2.1
| 6,992
|
[
"VisIt"
] |
9c600c9b9cdc458f5a1d0b250c0a579f167c47dee15aac94609618e4beba9c6d
|
''' DIRAC.ResourceStatusSystem.Service package
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Service/__init__.py
|
Python
|
gpl-3.0
| 180
|
[
"DIRAC"
] |
2f7fc20c23a5c8b36db764ffa9ef61718863e2229857e2bf472a9a946c24234d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pathlib import Path
from monty.serialization import loadfn, dumpfn
import os
from pymatgen.core.periodic_table import Element
from pymatgen.entries.entry_tools import group_entries_by_structure, EntrySet
test_dir = Path(__file__).absolute().parent / ".." / ".." / ".." / 'test_files'
class FuncTest(unittest.TestCase):
def test_group_entries_by_structure(self):
entries = loadfn(str(test_dir / "TiO2_entries.json"))
groups = group_entries_by_structure(entries)
self.assertEqual(sorted([len(g) for g in groups]),
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4])
self.assertLess(len(groups), len(entries))
# Make sure no entries are left behind
self.assertEqual(sum([len(g) for g in groups]), len(entries))
class EntrySetTest(unittest.TestCase):
def setUp(self):
entries = loadfn(str(test_dir / "Li-Fe-P-O_entries.json"))
self.entry_set = EntrySet(entries)
def test_chemsys(self):
self.assertEqual(self.entry_set.chemsys, {'Fe', 'Li', 'O', 'P'})
def test_get_subset(self):
entries = self.entry_set.get_subset_in_chemsys(["Li", "O"])
for e in entries:
self.assertTrue(set([Element.Li, Element.O]).issuperset(e.composition.keys()))
self.assertRaises(ValueError, self.entry_set.get_subset_in_chemsys, ["Fe", "F"])
def test_remove_non_ground_states(self):
l = len(self.entry_set)
self.entry_set.remove_non_ground_states()
self.assertLess(len(self.entry_set), l)
def test_as_dict(self):
dumpfn(self.entry_set, "temp_entry_set.json")
entry_set = loadfn("temp_entry_set.json")
self.assertEqual(len(entry_set), len(self.entry_set))
os.remove("temp_entry_set.json")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/entries/tests/test_entry_tools.py
|
Python
|
mit
| 1,996
|
[
"pymatgen"
] |
e51b9938c1747e0336805fc3d2e97d2f7f10aed1eeaa42ac59a59f4dad91c8f7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is used to estimate the cost of various compounds. Costs are taken
from the a CostDB instance, for example a CSV file via CostDBCSV.
For compounds with no cost listed, a Phase Diagram style convex hull
optimization is performed to determine a set of compositions that can be mixed
to give the desired compound with lowest total cost.
"""
from __future__ import division, unicode_literals
import abc
from collections import defaultdict
import csv
import os
import itertools
from monty.design_patterns import singleton
from monty.string import unicode2str
import six
import scipy.constants as const
from pymatgen import Composition, Element
from pymatgen.util.provenance import is_valid_bibtex
from pymatgen.analysis.phase_diagram import PDEntry, PhaseDiagram
from io import open
__author__ = 'Anubhav Jain'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Aug 27, 2013'
module_dir = os.path.dirname(os.path.abspath(__file__))
class CostEntry(PDEntry):
"""
Extends PDEntry to include a BibTeX reference and include language about
cost
"""
def __init__(self, composition, cost, name, reference):
"""
Args:
composition:
Composition as a pymatgen.core.structure.Composition
cost:
Cost (per mol, NOT per kg) of the full Composition
name:
Optional parameter to name the entry. Defaults to the reduced
chemical formula as in PDEntry.
reference:
Reference data as BiBTeX string
"""
super(CostEntry, self).__init__(composition, cost, name)
if reference and not is_valid_bibtex(reference):
raise ValueError(
"Invalid format for cost reference! Should be BibTeX string.")
self.reference = reference
def __repr__(self):
return "CostEntry : {} with cost = {:.4f}".format(self.composition,
self.energy)
class CostDB(six.with_metaclass(abc.ABCMeta)):
"""
Abstract class for representing a Cost database.
Can be extended, e.g. for file-based or REST-based databases
"""
@abc.abstractmethod
def get_entries(self, chemsys):
"""
For a given chemical system, return an array of CostEntries
Args:
chemsys:
array of Elements defining the chemical system.
Returns:
array of CostEntries
"""
return
class CostDBCSV(CostDB):
"""
Read a CSV file to get costs
Format is formula,cost_per_kg,name,BibTeX
"""
def __init__(self, filename):
# read in data from file
self._chemsys_entries = defaultdict(list)
filename = os.path.join(os.path.dirname(__file__), filename)
with open(filename, "rt") as f:
reader = csv.reader(f, quotechar=unicode2str("|"))
for row in reader:
comp = Composition(row[0])
cost_per_mol = float(row[1]) * comp.weight.to("kg") * const.N_A
pde = CostEntry(comp.formula, cost_per_mol, row[2], row[3])
chemsys = "-".join(sorted([el.symbol
for el in pde.composition.elements]))
self._chemsys_entries[chemsys].append(pde)
def get_entries(self, chemsys):
chemsys = "-".join(sorted([el.symbol for el in chemsys]))
return self._chemsys_entries[chemsys]
@singleton
class CostDBElements(CostDBCSV):
"""
Singleton object that provides the cost data for elements
"""
def __init__(self):
CostDBCSV.__init__(
self, os.path.join(module_dir, "costdb_elements.csv"))
class CostAnalyzer(object):
"""
Given a CostDB, figures out the minimum cost solutions via convex hull
"""
def __init__(self, costdb):
self.costdb = costdb
def get_lowest_decomposition(self, composition):
"""
Get the decomposition leading to lowest cost
Args:
composition:
Composition as a pymatgen.core.structure.Composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
entries_list = []
elements = [e.symbol for e in composition.elements]
for i in range(len(elements)):
for combi in itertools.combinations(elements, i + 1):
chemsys = [Element(e) for e in combi]
x = self.costdb.get_entries(chemsys)
entries_list.extend(x)
try:
pd = PhaseDiagram(entries_list)
return pd.get_decomposition(composition)
except IndexError:
raise ValueError("Error during PD building; most likely, "
"cost data does not exist!")
def get_cost_per_mol(self, comp):
"""
Get best estimate of minimum cost/mol based on known data
Args:
comp:
Composition as a pymatgen.core.structure.Composition
Returns:
float of cost/mol
"""
comp = comp if isinstance(comp, Composition) else Composition(comp)
decomp = self.get_lowest_decomposition(comp)
return sum(k.energy_per_atom * v * comp.num_atoms for k, v in
decomp.items())
def get_cost_per_kg(self, comp):
"""
Get best estimate of minimum cost/kg based on known data
Args:
comp:
Composition as a pymatgen.core.structure.Composition
Returns:
float of cost/kg
"""
comp = comp if isinstance(comp, Composition) else Composition(comp)
return self.get_cost_per_mol(comp) / (
comp.weight.to("kg") * const.N_A)
|
czhengsci/pymatgen
|
pymatgen/analysis/cost/cost.py
|
Python
|
mit
| 6,002
|
[
"pymatgen"
] |
fb06a549f7e9b376f7329cae5d3a52586a63670c923d45b1963f42f41c5e6516
|
#!/usr/bin/python
"""
These function can be used to calculate the average end to end distances of a backbone from a lammmps output.
Usage:
# dump_dataframe must be in pythonpath or working directory
from endtoend_distance import rf
rf,rf_std = rf(first_frame=-1000, last_frame=-1, trajectory_step=10,atoms_per_polymer=184, number_of_chains=100)
Requirement:
numpy
pandas
dump_dataframe.py
scipy
Limitations:
Coordinates must be unwrapped (ex:xu,yu,zu)
Each dump must be a file
TODO:
Function to read a trajectory from a single file
"""
from dump_dataframe import read_dump
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from glob import glob
def endtoend(filename, atoms_per_polymer, number_of_chains):
"""
Function to calculate the end to end distances of each polymer chains from a dump.
Args:
----
filename(string): Filename of the dump
atoms_per_polymer(int): The number of particles/atoms in a single chains
number_of_chains(int): Number of chains in the system
Returns:
----
endtoend_dists(array): Numpy array with the end-to-end distance for each chains
"""
# Read the dump, coordinates must be unwrapped
dump = read_dump(filename, wrap=False)
# Select only the useful columns
wanted_columns = ["xu", "yu", "zu"]
rf_df = dump["atom_df"][wanted_columns]
# Create an empty array which will contains the distances
endtoend_dists = np.zeros(number_of_chains)
i = 0
while i < number_of_chains:
# Calculate the distance between the fist and the last atoms in the
# backbone
endtoend_dists[i] = pdist(
rf_df.loc[[1 + atoms_per_polymer * i, atoms_per_polymer + atoms_per_polymer * i]])
i += 1
return endtoend_dists
def rf(first_frame=-1000, last_frame=-1, trajectory_step=10,atoms_per_polymer=184, number_of_chains=100):
"""
Function to calculate the Rf of a lammps trajectory.
Args:
----
first_frame(int): The first frame desired in the trajectory
last_frame(int): The frame to stop
trajectory_step(int): calculate only for each # of files
atoms_per_polymer(int): The number of atoms in the polymer chain
number_of_chains(int): The number of chains in the system
Returns:
----
Rfmean(float): The average end to end distances in the trajectory
Rfstd(float): The standard deviation of the Rf
"""
# List of all the dump in the trajectory
complete_trajectory = glob("*dump*")
# sort the list according to the number in the filename
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame:trajectory_step]
#create a empty numpy array to contains the end to end distances for each chain (columns)
#for each step (time)
rf = np.zeros((len(desired_trajectory),number_of_chains))
i=0
# for each file in the trajectory
for f in desired_trajectory:
#calculate the end to end distances for each chain
rf[i] = endtoend(f, atoms_per_polymer, number_of_chains)
i+=1
#return the mean average distances with its standard deviation
return rf.mean(),rf.std()
|
EtiCui/Msc-UdeS
|
dataAnalysis/endtoend_distance.py
|
Python
|
mit
| 3,264
|
[
"LAMMPS"
] |
a72e7dedf538a2c2f5045fc0048b892527d74fc720d3968c8670f34f013bc99d
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module implementing an XYZ file object class.
"""
import re
from io import StringIO
import pandas as pd
from monty.io import zopen
from pymatgen.core.structure import Molecule
class XYZ:
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol: Molecule, coord_precision: int = 6):
"""
Args:
mol: Input molecule or list of molecules
coord_precision: Precision to be used for coordinates.
"""
if isinstance(mol, Molecule) or not isinstance(mol, list):
self._mols = [mol]
else:
self._mols = mol
self.precision = coord_precision
@property
def molecule(self) -> Molecule:
"""
Returns molecule associated with this XYZ. In case multiple frame
XYZ, returns the last frame.
"""
return self._mols[-1]
@property
def all_molecules(self):
"""
Returns all the frames of molecule associated with this XYZ.
"""
return self._mols
@staticmethod
def _from_frame_string(contents):
"""
Convert a single frame XYZ string to a molecule
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(r"(\w+)\s+([0-9\-\+\.*^eEdD]+)\s+([0-9\-\+\.*^eEdD]+)\s+" r"([0-9\-\+\.*^eEdD]+)")
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# this is 0-indexed
# in case of 0.0D+00 or 0.00d+01 old double precision writing
# replace d or D by e for ten power exponent,
# and some files use *^ convention in place of e
xyz = [val.lower().replace("d", "e").replace("*^", "e") for val in m.groups()[1:4]]
coords.append([float(val) for val in xyz])
return Molecule(sp, coords)
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents: String representing an XYZ file.
Returns:
XYZ object
"""
if contents[-1] != "\n":
contents += "\n"
white_space = r"[ \t\r\f\v]"
natoms_line = white_space + r"*\d+" + white_space + r"*\n"
comment_line = r"[^\n]*\n"
coord_lines = r"(\s*\w+\s+[0-9\-\+\.*^eEdD]+\s+[0-9\-\+\.*^eEdD]+" r"\s+[0-9\-\+\.*^eEdD]+.*\n)+"
frame_pattern_text = natoms_line + comment_line + coord_lines
pat = re.compile(frame_pattern_text, re.MULTILINE)
mols = []
for xyz_match in pat.finditer(contents):
xyz_text = xyz_match.group(0)
mols.append(XYZ._from_frame_string(xyz_text))
return XYZ(mols)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename, "rt") as f:
return XYZ.from_string(f.read())
def as_dataframe(self):
"""
Generates a coordinates data frame with columns: atom, x, y, and z
In case of multiple frame XYZ, returns the last frame.
Returns:
pandas.DataFrame
"""
lines = str(self)
sio = StringIO(lines)
df = pd.read_csv(
sio,
header=None,
skiprows=[0, 1],
comment="#",
delim_whitespace=True,
names=["atom", "x", "y", "z"],
)
df.index += 1
return df
def _frame_str(self, frame_mol):
output = [str(len(frame_mol)), frame_mol.composition.formula]
fmtstr = f"{{}} {{:.{self.precision}f}} {{:.{self.precision}f}} {{:.{self.precision}f}}"
for site in frame_mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def __str__(self):
return "\n".join([self._frame_str(mol) for mol in self._mols])
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename: File name of output file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
|
materialsproject/pymatgen
|
pymatgen/io/xyz.py
|
Python
|
mit
| 4,754
|
[
"pymatgen"
] |
005d9249a02079c36cc6332f93edfdcca206bbe1f632d1a6d2e6b64f2c885d60
|
#!/usr/bin/env jython
# copyright 2002 the Brothers Wilcox
# <mailto:zooko@zooko.com>
#
# This file is part of OvP.
#
# OvP is open source software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OvP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OvP; if not, write to zooko.com:
# <a mailto:zooko@zooko.com>
#
# See the file COPYING or visit http://www.gnu.org/ for details.
# CVS:
__cvsid = '$Id: Game.py,v 1.2 2002/02/09 22:46:13 zooko Exp $'
import operator
from java.lang import *
from javax.swing import *
from java.awt import *
true = 1
false = 0
class TurnMan:
def __init__(self, game, creatures, color):
self.game = game
self.creatures = creatures
self.color = color
self.turnnumber = 0
self.rbotevs = []
self.reotevs = []
self.waitingforconfirm = false # this is `true' if we are waiting for the user to press the `t' key before beginning the next turn
def __repr__(self):
if self.color is Color.red:
return "red %s <%x>" % (self.__class__.__name__, id(self),)
elif self.color is Color.blue:
return "blue %s <%x>" % (self.__class__.__name__, id(self),)
else:
return "%s %s <%x>" % (self.color, self.__class__.__name__, id(self),)
def register_regular_bot_event(self, rbotev, args=(), kwargs={}, priority="whatever"):
if priority == "first":
self.rbotevs.insert(0, (rbotev, args, kwargs,))
else:
self.rbotevs.append((rbotev, args, kwargs,))
def register_regular_eot_event(self, reotev, args=(), kwargs={}):
self.reotevs.append((reotev, args, kwargs,))
def begin_turn(self):
self.turnnumber += 1
# print "%s.begin_turn(): %s" % (self, self.turnnumber,),
for (rbotev, args, kwargs,) in self.rbotevs:
apply(rbotev, args, kwargs)
def end_turn(self):
for (reotev, args, kwargs,) in self.reotevs:
apply(reotev, args, kwargs)
# print "end_turn(): (waiting for `t' key)"
# self.waitingforconfirm = true
def _really_end_turn(self):
self.waitingforconfirm = false
for (reotev, args, kwargs,) in self.reotevs:
apply(reotev, args, kwargs)
def select_next_creature_or_end_turn(self):
"""
Selects the next creature with action points. If there are no more creatures, it does end-of-turn.
"""
if self.game.selectedcreature is not None:
i = self.creatures.index(self.game.selectedcreature)
self.game.selectedcreature.unselect()
cs = self.creatures[i+1:] + self.creatures[:i+1]
else:
cs = self.creatures
for c in cs:
if c.actpleft > 0:
c.select()
return
# No more creatures have action points. Next turn!
self.end_turn()
|
zooko/ogresvpixies
|
Game.py
|
Python
|
gpl-2.0
| 2,984
|
[
"VisIt"
] |
61674bf09cb4f3917eff4eab779d2e88f578b5a5a8d556944e2a1a06267e6a9b
|
#### Convenience Functions to be moved to kerneltools ####
from statsmodels.compat.python import range
import numpy as np
def forrt(X,m=None):
"""
RFFT with order like Munro (1976) FORTT routine.
"""
if m is None:
m = len(X)
y = np.fft.rfft(X,m)/m
return np.r_[y.real,y[1:-1].imag]
def revrt(X,m=None):
"""
Inverse of forrt. Equivalent to Munro (1976) REVRT routine.
"""
if m is None:
m = len(X)
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
return np.fft.irfft(y)*m
def silverman_transform(bw, M, RANGE):
"""
FFT of Gaussian kernel following to Silverman AS 176.
Notes
-----
Underflow is intentional as a dampener.
"""
J = np.arange(M/2+1)
FAC1 = 2*(np.pi*bw/RANGE)**2
JFAC = J**2*FAC1
BC = 1 - 1./3 * (J*1./M*np.pi)**2
FAC = np.exp(-JFAC)/BC
kern_est = np.r_[FAC,FAC[1:-1]]
return kern_est
def counts(x,v):
"""
Counts the number of elements of x that fall within the grid points v
Notes
-----
Using np.digitize and np.bincount
"""
idx = np.digitize(x,v)
try: # numpy 1.6
return np.bincount(idx, minlength=len(v))
except:
bc = np.bincount(idx)
return np.r_[bc,np.zeros(len(v)-len(bc))]
def kdesum(x,axis=0):
return np.asarray([np.sum(x[i] - x, axis) for i in range(len(x))])
|
detrout/debian-statsmodels
|
statsmodels/nonparametric/kdetools.py
|
Python
|
bsd-3-clause
| 1,357
|
[
"Gaussian"
] |
ca2680802f0e98620c6fbca20eef5aa1ce0421887b454e0c34fd9ce031df122c
|
#
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2020 Thomas Reichenbach (Fraunhofer IWM)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ase.io import Trajectory, read
from ase.units import GPa, kB, fs, m, s
import numpy as np
from ase.md.langevin import Langevin
from matscipy import pressurecoupling as pc
from io import open
# Parameters
dt = 1.0 * fs # MD time step
C11 = 500.0 * GPa # material constant
p_c = 0.10 # experience value
Pdir = 2 # index of cell axis along normal pressure is applied
P = 5.0 * GPa # target normal pressure
v = 100.0 * m / s # constant sliding speed
vdir = 0 # index of cell axis along sliding happens
T = 300.0 # target temperature for thermostat
# thermostat is applied in the third direction which
# is neither pressure nor sliding direction and only
# in the middle region between top and bottom.
# This makes sense for small systems which cannot have
# a dedicated thermostat region.
t_langevin = 75.0 * fs # time constant for Langevin thermostat
gamma_langevin = 1. / t_langevin # derived Langevin parameter
t_integrate = 1000.0 * fs # simulation time
steps_integrate = int(t_integrate / dt) # number of simulation steps
# get atoms from trajectory to also initialize correct velocities
atoms = read('equilibrate_pressure.traj')
bottom_mask = np.loadtxt("bottom_mask.txt").astype(bool)
top_mask = np.loadtxt("top_mask.txt").astype(bool)
velocities = atoms.get_velocities()
velocities[top_mask, Pdir] = 0.0 # large mass will run away with v from equilibration
atoms.set_velocities(velocities)
damp = pc.AutoDamping(C11, p_c)
slider = pc.SlideWithNormalPressureCuboidCell(top_mask, bottom_mask, Pdir, P, vdir, v, damp)
atoms.set_constraint(slider)
calc = ASE_CALCULATOR_OBJECT # put a specific calculator here
atoms.set_calculator(calc)
temps = np.zeros((len(atoms), 3))
temps[slider.middle_mask, slider.Tdir] = kB * T
gammas = np.zeros((len(atoms), 3))
gammas[slider.middle_mask, slider.Tdir] = gamma_langevin
integrator = Langevin(atoms, dt, temps, gammas, fixcm=False)
trajectory = Trajectory('slide.traj', 'w', atoms)
log_handle = open('log_slide.txt', 'w', 1, encoding='utf-8') # line buffered
logger = pc.SlideLogger(log_handle, atoms, slider, integrator)
# log can be read using pc.SlideLog (see docstring there)
logger.write_header()
logger() # step 0
trajectory.write() # step 0
integrator.attach(logger)
integrator.attach(trajectory)
integrator.run(steps_integrate)
log_handle.close()
trajectory.close()
|
libAtoms/matscipy
|
examples/pressure_coupling/slide.py
|
Python
|
lgpl-2.1
| 3,242
|
[
"ASE",
"Matscipy"
] |
298c21d22021bb3aa92e30594fdba680f1fd40e4c332de6559755272ee2f9deb
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
import logging
# Django
from django.contrib.auth.models import User
from django.core.cache import cache
from django.urls import (
reverse,
reverse_lazy
)
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WgerDeleteTestCase,
WgerTestCase
)
from wger.exercises.models import Exercise
from wger.manager.models import (
Workout,
WorkoutLog,
WorkoutSession
)
from wger.utils.cache import cache_mapper
from wger.utils.constants import WORKOUT_TAB
logger = logging.getLogger(__name__)
class WorkoutLogShareButtonTestCase(WgerTestCase):
"""
Test that the share button is correctly displayed and hidden
"""
def test_share_button(self):
url = reverse('manager:log:log', kwargs={'pk': 1})
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
class WeightLogAccessTestCase(WgerTestCase):
"""
Test accessing the weight log page
"""
def test_access_shared(self):
"""
Test accessing the URL of a shared weight log
"""
url = reverse('manager:log:log', kwargs={'pk': 1})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
"""
Test accessing the URL of a private weight log
"""
url = reverse('manager:log:log', kwargs={'pk': 3})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
class CalendarShareButtonTestCase(WgerTestCase):
"""
Test that the share button is correctly displayed and hidden
"""
def test_share_button(self):
url = reverse('manager:workout:calendar', kwargs={'username': 'admin'})
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
class CalendarAccessTestCase(WgerTestCase):
"""
Test accessing the calendar page
"""
def test_access_shared(self):
"""
Test accessing the URL of a shared calendar page
"""
url = reverse('manager:workout:calendar', kwargs={'username': 'admin'})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
"""
Test accessing the URL of a unshared calendar page
"""
url = reverse('manager:workout:calendar', kwargs={'username': 'test'})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class WeightLogOverviewAddTestCase(WgerTestCase):
"""
Tests the weight log functionality
"""
def add_weight_log(self, fail=True):
"""
Helper function to test adding weight log entries
"""
# Fetch the overview page
response = self.client.get(reverse('manager:log:log', kwargs={'pk': 1}))
# All access OK, since user 1 has ro_access = True
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['active_tab'], WORKOUT_TAB)
self.assertEqual(response.context['workout'].id, 1)
# Open the log entry page
response = self.client.get(reverse('manager:day:log', kwargs={'pk': 1}))
if fail:
self.assertIn(response.status_code, (302, 403))
else:
self.assertEqual(response.status_code, 200)
# Add new log entries
count_before = WorkoutLog.objects.count()
form_data = {'date': '2012-01-01',
'notes': 'My cool impression',
'impression': '3',
'time_start': datetime.time(10, 0),
'time_end': datetime.time(12, 0),
'form-0-reps': 10,
'form-0-repetition_unit': 1,
'form-0-weight': 10,
'form-0-weight_unit': 1,
'form-0-rir': '1',
'form-1-reps': 10,
'form-1-repetition_unit': 1,
'form-1-weight': 10,
'form-1-weight_unit': 1,
'form-1-rir': '2',
'form-TOTAL_FORMS': 3,
'form-INITIAL_FORMS': 0,
'form-MAX-NUM_FORMS': 3}
response = self.client.post(reverse('manager:day:log', kwargs={'pk': 1}), form_data)
count_after = WorkoutLog.objects.count()
# Logged out users get a 302 redirect to login page
# Users not owning the workout, a 403, forbidden
if fail:
self.assertIn(response.status_code, (302, 403))
self.assertEqual(count_before, count_after)
else:
self.assertEqual(response.status_code, 302)
self.assertGreater(count_after, count_before)
#
# Post log without RiR
#
form_data['form-0-rir'] = ''
form_data['form-1-rir'] = ''
count_before = WorkoutLog.objects.count()
response = self.client.post(reverse('manager:day:log', kwargs={'pk': 1}), form_data)
count_after = WorkoutLog.objects.count()
if fail:
self.assertIn(response.status_code, (302, 403))
self.assertEqual(count_before, count_after)
else:
self.assertEqual(response.status_code, 302)
self.assertGreater(count_after, count_before)
def test_add_weight_log_anonymous(self):
"""
Tests adding weight log entries as an anonymous user
"""
self.add_weight_log(fail=True)
def test_add_weight_log_owner(self):
"""
Tests adding weight log entries as the owner user
"""
self.user_login('admin')
self.add_weight_log(fail=False)
def test_add_weight_log_other(self):
"""
Tests adding weight log entries as a logged user not owning the data
"""
self.user_login('test')
self.add_weight_log(fail=True)
class WeightlogTestCase(WgerTestCase):
"""
Tests other model methods
"""
def test_get_workout_session(self):
"""
Test the wgerGetWorkoutSession method
"""
user1 = User.objects.get(pk=1)
user2 = User.objects.get(pk=2)
workout1 = Workout.objects.get(pk=2)
workout2 = Workout.objects.get(pk=2)
WorkoutLog.objects.all().delete()
log = WorkoutLog()
log.user = user1
log.date = datetime.date(2014, 1, 5)
log.exercise = Exercise.objects.get(pk=1)
log.workout = workout1
log.weight = 10
log.reps = 10
log.save()
session1 = WorkoutSession()
session1.user = user1
session1.workout = workout1
session1.notes = 'Something here'
session1.impression = '3'
session1.date = datetime.date(2014, 1, 5)
session1.save()
session2 = WorkoutSession()
session2.user = user1
session2.workout = workout1
session2.notes = 'Something else here'
session2.impression = '1'
session2.date = datetime.date(2014, 1, 1)
session2.save()
session3 = WorkoutSession()
session3.user = user2
session3.workout = workout2
session3.notes = 'The notes here'
session3.impression = '2'
session3.date = datetime.date(2014, 1, 5)
session3.save()
self.assertEqual(log.get_workout_session(), session1)
class WeightLogDeleteTestCase(WgerDeleteTestCase):
"""
Tests deleting a WorkoutLog
"""
object_class = WorkoutLog
url = reverse_lazy('manager:log:delete', kwargs={'pk': 1})
pk = 1
class WeightLogEntryEditTestCase(WgerTestCase):
"""
Tests editing individual weight log entries
"""
def edit_log_entry(self, fail=True):
"""
Helper function to test edit log entries
"""
response = self.client.get(reverse('manager:log:edit', kwargs={'pk': 1}))
if fail:
self.assertTrue(response.status_code in (302, 403))
else:
self.assertEqual(response.status_code, 200)
date_before = WorkoutLog.objects.get(pk=1).date
response = self.client.post(reverse('manager:log:edit', kwargs={'pk': 1}),
{'date': '2012-01-01',
'reps': 10,
'repetition_unit': 2,
'weight_unit': 3,
'weight': 10,
'exercise': 1,
'rir': 2
})
date_after = WorkoutLog.objects.get(pk=1).date
if fail:
# Logged out users get a 302 redirect to login page
# Users not owning the workout, a 403, forbidden
self.assertTrue(response.status_code in (302, 403))
self.assertEqual(date_before, date_after)
else:
self.assertEqual(response.status_code, 302)
self.assertEqual(date_after, datetime.date(2012, 1, 1))
def test_edit_log_entry_anonymous(self):
"""
Tests editing a weight log entries as an anonymous user
"""
self.edit_log_entry(fail=True)
def test_edit_log_entry_owner(self):
"""
Tests editing a weight log entries as the owner user
"""
self.user_login('admin')
self.edit_log_entry(fail=False)
def test_edit_log_entry_other(self):
"""
Tests editing a weight log entries as a logged user not owning the data
"""
self.user_login('test')
self.edit_log_entry(fail=True)
class WorkoutLogCacheTestCase(WgerTestCase):
"""
Workout log cache test case
"""
def test_calendar(self):
"""
Test the log cache is correctly generated on visit
"""
log_hash = hash((1, 2012, 10))
self.user_login('admin')
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_calendar_day(self):
"""
Test the log cache on the calendar day view is correctly generated on visit
"""
log_hash = hash((1, 2012, 10, 1))
self.user_login('admin')
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_calendar_anonymous(self):
"""
Test the log cache is correctly generated on visit by anonymous users
"""
log_hash = hash((1, 2012, 10))
self.user_logout()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar', kwargs={'username': 'admin',
'year': 2012,
'month': 10}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_calendar_day_anonymous(self):
"""
Test the log cache is correctly generated on visit by anonymous users
"""
log_hash = hash((1, 2012, 10, 1))
self.user_logout()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_cache_update_log(self):
"""
Test that the caches are cleared when saving a log
"""
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=1)
log.weight = 35
log.save()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
def test_cache_update_log_2(self):
"""
Test that the caches are only cleared for a the log's month
"""
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=3)
log.weight = 35
log.save()
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
def test_cache_delete_log(self):
"""
Test that the caches are cleared when deleting a log
"""
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=1)
log.delete()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
def test_cache_delete_log_2(self):
"""
Test that the caches are only cleared for a the log's month
"""
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=3)
log.delete()
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
class WorkoutLogApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the workout log overview resource
"""
pk = 5
resource = WorkoutLog
private_resource = True
data = {"exercise": 1,
"workout": 3,
"reps": 3,
"repetition_unit": 1,
"weight_unit": 2,
"weight": 2,
"date": datetime.date.today()}
|
rolandgeider/wger
|
wger/manager/tests/test_weight_log.py
|
Python
|
agpl-3.0
| 18,794
|
[
"VisIt"
] |
9d58a0a59ec768f3327d7f69dbf53471a4658c88cdfb752e10750623f6048376
|
import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
import scipy.ndimage as ndimage
#read JP and TH files
#def read_JP_files(fname):
# da = np.genfromtxt(fname, delimiter=" ", comments='#')
# return da[:,0], da[:,1], da[:,2], da[:,3],da[:,4],da[:,5]
#Read JN files
def read_JN_files(fname):
da = np.genfromtxt(fname, delimiter=",")
return da[:,0],da[:,1],da[:,2],da[:,3],da[:,4],da[:,5],da[:,6],da[:,7],da[:,8]
## Plot
fig = figure(figsize=(8,3), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(1, 100)
gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 7.0
#phase limits
xmin = 0.0
xmax = 90.0
ymin = 0.0
ymax = 90.0
#figure shape parameters
panelh = 70
skiph = 30
mfiglim = 0
#path to files
path_files = "../../out_skymaps/"
#labels size
tsize = 8.0
#general parameters
nu = 'f600'
bprof = 'pbb'
rad = 'r15'
mass = 'm1.6'
rho = 'x10'
incls = ['i5','i10','i20','i30','i40','i50','i60','i70','i80','i90']
incls_g = [5,10,20,30,40,50,60,70,80,90]
colat_g = [10,30,50,70,90]
#pre-read one file to get initial values
colat = 'd10'
incl = incls[0]
fname = path_files + nu+bprof+rad+mass+colat+incl+rho
phase_g, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname+'.csv')
Nt = len(phase_g)
phase_t = np.linspace(0.0, 1.0, 200)
incls_t = np.linspace(0.0, 90.0, 100)
maxflux = 0.0
fig.text(0.3, 0.92, 'One spot', ha='center', va='center', size=10)
fig.text(0.7, 0.92, 'Two antipodal spots', ha='center', va='center', size=10)
#empty matrix
pfracs = np.zeros((len(colat_g)+1, len(incls_g)+1))
for k in range(2):
#frame for the main pulse profile fig
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, k])
if k == 0:
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, 0:46])
ax1 = subplot(gs[0, 0:46])
else:
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, 49:95])
ax1 = subplot(gs[0, 49:95])
ax1.minorticks_on()
ax1.set_xlim(xmin, xmax)
ax1.set_ylim(ymin, ymax)
ax1.set_xlabel('Inclination $i$', size=lsize)
if k == 0:
ax1.set_ylabel('Spot colatitude $\\theta_s$', size=lsize)
elif k == 1:
ax1.set_yticklabels([])
for j in range(5):
if j == 0:
colat = '10'
elif j == 1:
colat = '30'
elif j == 2:
colat = '50'
elif j == 3:
colat = '70'
elif j == 4:
colat = '90'
#skymap = np.zeros((Nt, len(incls)))
#skymap = np.zeros((len(incls), Nt))
for q in range(len(incls)):
incl = incls[q]
#incl = incls[0]
fname = path_files + nu+bprof+rad+mass+'d'+colat+incl+rho
phase, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname+'.csv')
#add second spot
if k == 1:
phase2, N2kev2, N6kev2, N12kev2, Nbol2, Fbol2, F2kev2, F6kev2, F12kev2 = read_JN_files(fname+'_2nd.csv')
N2kev += N2kev2
N6kev += N6kev2
N12kev += N12kev2
Nbol += Nbol2
Fbol += Fbol2
F2kev += F2kev2
F6kev += F6kev2
F12kev += F12kev2
#build flux matrix
flux = Fbol
#flux = Fbol / flux.max()
#skymap[:,q] = flux
#skymap[q,:] = flux
pfracs[j+1,q+1] = (flux.max() - flux.min()) / (flux.max() + flux.min())
#print skymap.max()
#print shape(skymap)
#print skymap
#skymap_interp = griddata((phase_g, incls_g), skymap, (phase_t, incls_t), method='cubic')
#skymap_interp = griddata((phase_g, incls_g), skymap, np.meshgrid(phase_t, incls_t), method='cubic')
#print skymap_interp
hdata = pfracs
#xr0 = incls_g[0]
xr0 = 0.0
xr1 = incls_g[-1]
#yr0 = colat_g[0]
yr0 = 0.0
yr1 = colat_g[-1]
#print xr0, xr1, yr0, yr1
extent = [xr0, xr1, yr0, yr1]
hdata_smooth = ndimage.gaussian_filter(hdata, sigma=1.0, order=0)
#hdata_masked = np.ma.masked_where(hdata <= 0.001, hdata)
#im = ax1.imshow(hdata_masked.T,
im = ax1.imshow(hdata.T,
#interpolation='nearest',
interpolation='gaussian',
origin='lower',
extent=extent,
#cmap='Reds',
#cmap='jet',
#cmap='YlGnBu',
cmap='plasma_r',
vmin=0.0,
#vmax=0.4,
vmax=1.0,
aspect='auto')
#levels = [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
levels = [0.1, 0.2, 0.4, 0.6]
#levels = [0.05, 0.1, 0.2, 0.25, 0.3, 0.35]
if (k==0):
manual_locs = [(70, 12),
(70, 18,),
(70, 26),
(70, 33)]
else:
manual_locs = [(70, 12),
(70, 22,),
(70, 33),
(70, 50)]
#cs1 = ax1.contour(hdata_smooth.T,
cs1 = ax1.contour(hdata.T,
levels,
colors = 'k',
origin='lower',
extent=extent)
clabel(cs1, inline=1, fontsize=8, fmt='%1.1f',manual=manual_locs)
#zc = cs1.collections[0]
#setp(zc, linewidth=1)
print hdata
if k == 1:
#mfiglim:mfiglim+panelh, 0:40])
#cbaxes = fig.add_axes([0.90, (mfiglim+panelh)/500, 0.05, panelh/500.0])
cbaxes = subplot(gs[0, 95:97])
cb = colorbar(im,
#label='Probability density',
cax=cbaxes)
cb.set_label('Pulse fraction',size=lsize)
#fig.text(0.5, 0.91-j*0.16, '$\\theta_{\mathrm{s}}$ = '+colat, ha='center', va='center', size=tsize)
mfiglim += panelh+skiph
savefig('fig8.pdf', bbox_inches='tight')
|
natj/bender
|
paper/figs/fig8.py
|
Python
|
mit
| 6,241
|
[
"Gaussian"
] |
fbd1495095295515b12f5a85e4a281cedfff20828cda9396e9234e1a57571982
|
# (c) 2012-2016, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.core.management.base import BaseCommand
# elasticsearch
from elasticsearch_dsl import Index
# local
from galaxy.main.models import Platform, CloudPlatform, Tag, Role
from galaxy.main.search_models import (
TagDoc, CloudPlatformDoc, PlatformDoc, UserDoc)
class Command(BaseCommand):
help = 'Rebuild custom elasticsearch indexes: galaxy_platforms, galaxy_tags'
def handle(self, *args, **options):
self.rebuild_tags()
self.rebuild_platforms()
self.rebuild_cloud_platforms()
self.rebuild_users()
def rebuild_users(self):
galaxy_users = Index('galaxy_users')
galaxy_users.doc_type(UserDoc)
galaxy_users.delete(ignore=404)
galaxy_users.create()
for role in Role.objects.filter(active=True, is_valid=True).order_by('namespace').distinct('namespace').all():
doc = UserDoc(username=role.namespace)
doc.save()
def rebuild_tags(self):
galaxy_tags = Index('galaxy_tags')
galaxy_tags.doc_type(TagDoc)
galaxy_tags.delete(ignore=404)
galaxy_tags.create()
for tag in Tag.objects.filter(active=True).all():
doc = TagDoc(tag=tag.name, roles=tag.get_num_roles())
doc.meta.id = tag.id
doc.save()
def rebuild_platforms(self):
galaxy_platforms = Index('galaxy_platforms')
galaxy_platforms.doc_type(PlatformDoc)
galaxy_platforms.delete(ignore=404)
galaxy_platforms.create()
for platform in Platform.objects.filter(active=True).distinct('name').all():
alias_list = [alias for alias in self.get_platform_search_terms(platform.name)]
alias_list = '' if len(alias_list) == 0 else alias_list
release_list = [p.release for p in Platform.objects.filter(active=True, name=platform.name)
.order_by('release').distinct('release').all()]
search_name = 'Enterprise_Linux' if platform.name == 'EL' else platform.name
doc = PlatformDoc(
name=search_name,
releases=release_list,
roles=Role.objects.filter(active=True, is_valid=True, platforms__name=platform.name)
.order_by('namespace', 'name')
.distinct('namespace', 'name').count(),
alias=alias_list,
autocomplete="%s %s %s" % (search_name, ' '.join(release_list), ' '.join(alias_list))
)
doc.save()
def rebuild_cloud_platforms(self):
index = Index('galaxy_cloud_platforms')
index.doc_type(CloudPlatformDoc)
index.delete(ignore=404)
index.create()
for platform in CloudPlatform.objects.filter(active=True).all():
doc = CloudPlatformDoc(
name=platform.name,
roles=(
Role.objects
.filter(
active=True, is_valid=True,
cloud_platforms__name=platform.name)
.order_by('namespace', 'name')
.distinct('namespace', 'name').count()),
autocomplete=platform.name,
)
doc.save()
def get_platform_search_terms(self, name):
'''
Fetch the unique set of aliases for a given platform
'''
terms = []
for platform in Platform.objects.filter(active=True, name=name).all():
if platform.alias:
terms += platform.alias.split(' ')
return set(terms)
|
chouseknecht/galaxy
|
galaxy/main/management/commands/rebuild_galaxy_indexes.py
|
Python
|
apache-2.0
| 4,297
|
[
"Galaxy"
] |
89d5af80e7494098414b0ac8845680a9127547cebc78fd87b7c860f368227367
|
import base64
import binascii
import hashlib
import hmac
import rsa
from bs4 import BeautifulSoup
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement
from kik_unofficial.device_configuration import device_id, kik_version_info, android_id
from kik_unofficial.utilities.cryptographic_utilities import CryptographicUtils
captcha_element = '<challenge><response>{}</response></challenge>'
kik_version = kik_version_info["kik_version"]
private_key_pem = "-----BEGIN RSA PRIVATE KEY-----\nMIIBPAIBAAJBANEWUEINqV1KNG7Yie9GSM8t75ZvdTeqT7kOF40kvDHIp" \
"/C3tX2bcNgLTnGFs8yA2m2p7hKoFLoxh64vZx5fZykCAwEAAQJAT" \
"/hC1iC3iHDbQRIdH6E4M9WT72vN326Kc3MKWveT603sUAWFlaEa5T80GBiP/qXt9PaDoJWcdKHr7RqDq" \
"+8noQIhAPh5haTSGu0MFs0YiLRLqirJWXa4QPm4W5nz5VGKXaKtAiEA12tpUlkyxJBuuKCykIQbiUXHEwzFYbMHK5E" \
"/uGkFoe0CIQC6uYgHPqVhcm5IHqHM6/erQ7jpkLmzcCnWXgT87ABF2QIhAIzrfyKXp1ZfBY9R0H4pbboHI4uatySKc" \
"Q5XHlAMo9qhAiEA43zuIMknJSGwa2zLt/3FmVnuCInD6Oun5dbcYnqraJo=\n-----END RSA PRIVATE KEY----- "
private_key = rsa.PrivateKey.load_pkcs1(private_key_pem, format='PEM')
class LoginRequest(XMPPElement):
"""
Represents a Kik Login request.
"""
def __init__(self, username, password, captcha_result=None, device_id_override=None, android_id_override=None):
super().__init__()
self.username = username
self.password = password
self.captcha_result = captcha_result
self.device_id_override = device_id_override
self.android_id_override = android_id_override
def serialize(self) -> bytes:
password_key = CryptographicUtils.key_from_password(self.username, self.password)
captcha = captcha_element.format(self.captcha_result) if self.captcha_result else ''
if '@' in self.username:
tag = ('<email>{}</email>'
'<passkey-e>{}</passkey-e>')
else:
tag = ('<username>{}</username>'
'<passkey-u>{}</passkey-u>')
data = ('<iq type="set" id="{}">'
'<query xmlns="jabber:iq:register">'
'{}'
'<device-id>{}</device-id>'
'<install-referrer>utm_source=google-play&utm_medium=organic</install-referrer>'
'<operator>unknown</operator>'
'<install-date>unknown</install-date>'
'<device-type>android</device-type>'
'<brand>generic</brand>'
'<logins-since-install>1</logins-since-install>'
'<version>{}</version>'
'<lang>en_US</lang>'
'<android-sdk>19</android-sdk>'
'<registrations-since-install>0</registrations-since-install>'
'<prefix>CAN</prefix>'
'<android-id>{}</android-id>'
'<model>Samsung Galaxy S5 - 4.4.4 - API 19 - 1080x1920</model>'
'{}'
'</query>'
'</iq>').format(self.message_id, tag.format(self.username, password_key),
self.device_id_override if self.device_id_override else device_id,
kik_version, self.android_id_override if self.android_id_override else android_id, captcha)
return data.encode()
class LoginResponse:
"""
Represents a Kik Login response that is received after a log-in attempt.
"""
def __init__(self, data: BeautifulSoup):
self.kik_node = data.query.node.text
self.email = data.query.email.text
self.is_email_confirmed = data.query.email['confirmed'] == "true"
self.username = data.query.username.text
self.first_name = data.query.first.text
self.last_name = data.query.last.text
class MakeAnonymousStreamInitTag(XMPPElement):
def __init__(self, device_id_override=None, n=1):
super().__init__()
self.device_id_override = device_id_override
self.n = n
def serialize(self):
can = 'CAN' # XmppSocketV2.java line 180,
device = self.device_id_override if self.device_id_override else device_id
timestamp = str(CryptographicUtils.make_kik_timestamp())
sid = CryptographicUtils.make_kik_uuid()
signature = rsa.sign("{}:{}:{}:{}".format(can + device, kik_version, timestamp, sid).encode(), private_key, 'SHA-256')
signature = base64.b64encode(signature, '-_'.encode()).decode().rstrip('=')
hmac_data = timestamp + ":" + can + device
hmac_secret_key = CryptographicUtils.build_hmac_key()
cv = binascii.hexlify(hmac.new(hmac_secret_key, hmac_data.encode(), hashlib.sha1).digest()).decode()
the_map = {
'signed': signature,
'lang': 'en_US',
'sid': sid,
'anon': '1',
'ts': timestamp,
'v': kik_version,
'cv': cv,
'conn': 'WIFI',
'dev': can+device,
}
# Test data to confirm the sort_kik_map function returns the correct result.
# the_map = {
# 'signed': 'signature',
# 'lang': 'en_US',
# 'sid': 'sid',
# 'anon': '1',
# 'ts': 'timestamp',
# 'v': 'kik_version',
# 'cv': 'cv',
# 'conn': 'WIFI',
# 'dev': 'can+device',
# }
if self.n > 0:
the_map['n'] = self.n
packet = CryptographicUtils.make_connection_payload(*CryptographicUtils.sort_kik_map(the_map))
return packet.encode()
class EstablishAuthenticatedSessionRequest(XMPPElement):
"""
a request sent on the begging of the connection to establish
an authenticated session. That is, on the behalf of a specific kik user, with his credentials.
"""
def __init__(self, node, username, password, device_id_override=None):
super().__init__()
self.node = node
self.username = username
self.password = password
self.device_id_override = device_id_override
def serialize(self):
jid = self.node + "@talk.kik.com"
jid_with_resource = jid + "/CAN" + (self.device_id_override if self.device_id_override else device_id)
timestamp = str(CryptographicUtils.make_kik_timestamp())
sid = CryptographicUtils.make_kik_uuid()
# some super secret cryptographic stuff
signature = rsa.sign("{}:{}:{}:{}".format(jid, kik_version, timestamp, sid).encode(), private_key, 'SHA-256')
signature = base64.b64encode(signature, '-_'.encode()).decode().rstrip('=')
hmac_data = timestamp + ":" + jid
hmac_secret_key = CryptographicUtils.build_hmac_key()
cv = binascii.hexlify(hmac.new(hmac_secret_key, hmac_data.encode(), hashlib.sha1).digest()).decode()
password_key = CryptographicUtils.key_from_password(self.username, self.password)
the_map = {'from': jid_with_resource, 'to': 'talk.kik.com', 'p': password_key, 'cv': cv, 'v': kik_version,
'sid': sid, 'n': '1', 'conn': 'WIFI', 'ts': timestamp, 'lang': 'en_US', 'signed': signature}
packet = CryptographicUtils.make_connection_payload(*CryptographicUtils.sort_kik_map(the_map))
return packet.encode()
class ConnectionFailedResponse:
def __init__(self, data: BeautifulSoup):
self.message = data.find('msg').text
class CaptchaElement:
"""
The 'stc' element is received when Kik requires a captcha to be filled in, it's followed up by a 'hold' element after
which the connection is paused.
"""
def __init__(self, data: BeautifulSoup):
self.type = data.stp['type']
self.captcha_url = data.stp.text + "&callback_url=https://kik.com/captcha-url"
self.stc_id = data['id']
class CaptchaSolveRequest(XMPPElement):
"""
Response to the 'stc' element. Given the result of the captcha, the connection will resume.
"""
def __init__(self, stc_id: str, captcha_result: str):
super().__init__()
self.captcha_result = captcha_result
self.stc_id = stc_id
def serialize(self) -> bytes:
data = (
'<stc id="{}">'
'<sts>{}</sts>'
'</stc>'
).format(self.stc_id, self.captcha_result)
return data.encode()
|
tomer8007/kik-bot-api-unofficial
|
kik_unofficial/datatypes/xmpp/login.py
|
Python
|
mit
| 8,300
|
[
"Galaxy"
] |
738f03d29d508efdabc399ffec3d58d661200b0c37a3fc5acceb0df2904342fc
|
import os.path
from lwr.lwr_client import PathMapper
from lwr.lwr_client.action_mapper import path_type
from .test_utils import TempDirectoryTestCase
from galaxy.util.bunch import Bunch
class PathMapperTestCase(TempDirectoryTestCase):
def test_input(self):
local_path = os.path.join(os.path.dirname(self.temp_directory), "dataset1.dat")
path_mapper = self._path_mapper(local_path, path_type.INPUT)
new_path = path_mapper.remote_input_path_rewrite(local_path)
assert new_path == "/lwr/staging/1/inputs/dataset1.dat"
def test_output(self):
local_path = os.path.join(os.path.dirname(self.temp_directory), "dataset1.dat")
path_mapper = self._path_mapper(local_path, path_type.OUTPUT)
new_path = path_mapper.remote_output_path_rewrite(local_path)
assert new_path == "/lwr/staging/1/outputs/dataset1.dat"
def test_output_workdir(self):
local_path = os.path.join(self.temp_directory, "dataset1.dat")
path_mapper = self._path_mapper(local_path, path_type.OUTPUT_WORKDIR)
new_path = path_mapper.remote_output_path_rewrite(local_path)
assert new_path == "/lwr/staging/1/working/dataset1.dat"
def test_input_with_no_staging(self):
local_path = os.path.join(os.path.dirname(self.temp_directory), "dataset1.dat")
path_mapper = self._path_mapper(local_path, path_type.INPUT, staging_needed=False)
new_path = path_mapper.remote_input_path_rewrite(local_path)
assert new_path is None
def test_output_with_no_staging(self):
local_path = os.path.join(os.path.dirname(self.temp_directory), "dataset1.dat")
path_mapper = self._path_mapper(local_path, path_type.OUTPUT, staging_needed=False)
new_path = path_mapper.remote_output_path_rewrite(local_path)
assert new_path is None
def test_version_path(self):
local_path = os.path.join(os.path.dirname(self.temp_directory), "GALAXY_VERSION_234")
path_mapper = self._path_mapper(local_path, path_type.OUTPUT)
new_path = path_mapper.remote_version_path_rewrite(local_path)
assert new_path == "/lwr/staging/1/outputs/COMMAND_VERSION"
def _path_mapper(self, expected_path, expected_type, staging_needed=True):
action_mapper = TestActionMapper(expected_path, expected_type, staging_needed)
path_mapper = PathMapper(
client=None,
remote_job_config=self.__test_remote_config(),
local_working_directory=self.temp_directory,
action_mapper=action_mapper,
)
return path_mapper
def __test_remote_config(self):
return dict(
inputs_directory="/lwr/staging/1/inputs",
outputs_directory="/lwr/staging/1/outputs",
configs_directory="/lwr/staging/1/configs",
working_directory="/lwr/staging/1/working",
unstructured_files_directory="/lwr/staging/1/unstructured",
system_properties=dict(separator="/"),
)
class TestActionMapper(object):
def __init__(self, expected_path, expected_type, staging_needed):
self.expected_path = expected_path
self.expected_type = expected_type
self._action = Bunch(staging_needed=staging_needed)
if not staging_needed:
self._action.path_rewrite = lambda path: None
def action(self, path, type):
assert self.expected_path == path
assert self.expected_type == type
return self._action
|
jmchilton/lwr
|
test/path_mapper_test.py
|
Python
|
apache-2.0
| 3,503
|
[
"Galaxy"
] |
22e05b97e70e8949b6b43e89f851bc2c9367afb2413e665b4a0304dcd746f390
|
#!/usr/bin/env python3
# encoding: utf-8
#
# datamodel.py
#
# Created by José Sánchez-Gallego on 18 Sep 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from marvin import config
__all__ = ('MapsProperty', 'MapsPropertyList', 'dap_datamodel',
'get_dap_datamodel', 'get_dap_maplist', 'get_default_mapset',
'get_default_plot_params')
class MapsPropertyList(list):
def __init__(self, items, version=None):
list.__init__(self, items)
self.version = version
def get(self, value):
"""Returns the MapsProperty and channel matching a value."""
for item in self:
channels = [None] if not item.channels else item.channels
for channel in channels:
if value.lower() == item.fullname(channel):
return item, channel
return None
def list_names(self):
"""Returns a list of property names."""
return [prop.name for prop in self]
def __eq__(self, name):
for item in self:
if name.lower() == item.name:
return item
def __contains__(self, name):
for item in self:
if name.lower() == item.name:
return True
return False
def __getitem__(self, name):
if isinstance(name, str):
return self == name
else:
return list.__getitem__(self, name)
class MapsProperty(object):
"""A class to represent a Maps property."""
def __init__(self, name, ivar=False, mask=False, channels=None, unit=None, description=''):
self.name = name.lower()
self.ivar = ivar
self.mask = mask
self.channels = channels
self.unit = unit
self.description = description
if self.channels:
for ii in range(len(self.channels)):
self.channels[ii] = self.channels[ii].lower()
def __repr__(self):
return ('<MapsProperty name={0.name}, ivar={0.ivar}, mask={0.mask}, n_channels={1}>'
.format(self, len(self.channels) if self.channels else None))
def fullname(self, channel=None, ext=None):
if self.channels is None:
if ext:
return self.name + '_' + ext
else:
return self.name
if channel is None:
raise ValueError('this MapsProperty has multiple channels. Please, specify one.')
if channel.lower() not in self.channels:
raise ValueError('invalid channel.')
if ext is not None:
assert ext in ['ivar', 'mask'], 'ext must be one of ivar or mask.'
return '{0}_{1}_{2}'.format(self.name, ext, channel.lower())
return '{0}_{1}'.format(self.name, channel.lower())
MPL4_emline_channels = ['oiid_3728', 'hb_4862', 'oiii_4960', 'oiii_5008', 'oi_6302',
'oi_6365', 'nii_6549', 'ha_6564', 'nii_6585', 'sii_6718', 'sii_6732']
MPL4_specindex_channels = ['d4000', 'caii0p39', 'hdeltaa', 'cn1', 'cn2', 'ca4227', 'hgammaa',
'fe4668', 'hb', 'mgb', 'fe5270', 'fe5335', 'fe5406', 'nad', 'tio1',
'tio2', 'nai0p82', 'caii0p86a', 'caii0p86b', 'caii0p86c', 'mgi0p88',
'tio0p89', 'feh0p99']
MPL4_specindex_units = ['Angstrom', 'Angstrom', 'Angstrom', 'mag', 'mag', 'Angstrom', 'Angstrom',
'Angstrom', 'Angstrom', 'Angstrom', 'Angstrom', 'Angstrom', 'Angstrom',
'Angstrom', 'mag', 'mag', 'Angstrom', 'Angstrom', 'Angstrom', 'Angstrom',
'Angstrom', 'Angstrom', 'Angstrom']
MPL5_extra_channels = ['oii_3727', 'oii_3729', 'heps_3971', 'hdel_4102', 'hgam_4341', 'heii_4687',
'hei_5877', 'siii_8831', 'siii_9071', 'siii_9533']
default_version = '2.0.2'
dap_datamodel = {
'1.1.1': MapsPropertyList([
MapsProperty('emline_gflux', ivar=True, mask=True, channels=MPL4_emline_channels,
unit='1E-17 erg/s/cm^2/spaxel',
description='Fluxes of emission lines based on a single Gaussian fit.'),
MapsProperty('emline_gvel', ivar=True, mask=True, channels=MPL4_emline_channels,
unit='km/s',
description='Doppler velocity shifts for emission lines relative to '
'the NSA redshift based on a single Gaussian fit.'),
MapsProperty('emline_gsigma', ivar=True, mask=True, channels=MPL4_emline_channels,
unit='km/s',
description='Velocity dispersions of emission lines based on a '
'single Gaussian fit.'),
MapsProperty('emline_instsigma', ivar=False, mask=False,
channels=MPL4_emline_channels,
unit='km/s',
description='Instrumental velocity dispersion at the line centroids '
'for emission lines (based on a single Gaussian fit.'),
MapsProperty('emline_ew', ivar=True, mask=True, channels=MPL4_emline_channels,
unit='Angstrom',
description='Equivalent widths for emission lines based on a '
'single Gaussian fit.'),
MapsProperty('emline_sflux', ivar=True, mask=True, channels=MPL4_emline_channels,
unit='1E-17 erg/s/cm^2/spaxel',
description='Fluxes for emission lines based on integrating the '
'flux over a set of passbands.'),
MapsProperty('stellar_vel', ivar=True, mask=True, channels=None,
unit='km/s',
description='Stellar velocity measurements.'),
MapsProperty('stellar_sigma', ivar=True, mask=True, channels=None,
unit='km/s',
description='Stellar velocity dispersion measurements.'),
MapsProperty('specindex', ivar=True, mask=True,
channels=MPL4_specindex_channels,
unit=None,
description='Measurements of spectral indices.'),
MapsProperty('binid', ivar=False, mask=False, channels=None,
unit=None,
description='ID number for the bin for which the pixel value was '
'calculated; bins are sorted by S/N.')],
version='1.1.1'),
'2.0.2': MapsPropertyList([
MapsProperty('spx_skycoo', ivar=False, mask=False, channels=['on_sky_x', 'on_sky_y'],
unit='arcsec',
description='Offsets of each spaxel from the galaxy center.'),
MapsProperty('spx_ellcoo', ivar=False, mask=False,
channels=['elliptical_radius', 'elliptical_azimuth'],
unit=['arcsec', 'degrees'],
description='Elliptical polar coordinates of each spaxel from '
'the galaxy center.'),
MapsProperty('spx_mflux', ivar=True, mask=False, channels=None,
unit='1E-17 erg/s/cm^2/ang/spaxel',
description='Mean flux in r-band (5600.1-6750.0 ang).'),
MapsProperty('spx_snr', ivar=False, mask=False, channels=None,
unit=None,
description='r-band signal-to-noise ratio per pixel.'),
MapsProperty('binid', ivar=False, mask=False, channels=None,
unit=None,
description='Numerical ID for spatial bins.'),
MapsProperty('bin_lwskycoo', ivar=False, mask=False,
channels=['lum_weighted_on_sky_x', 'lum_weighted_on_sky_y'],
unit='arcsec',
description='Light-weighted offset of each bin from the galaxy center.'),
MapsProperty('bin_lwellcoo', ivar=False, mask=False,
channels=['lum_weighted_elliptical_radius',
'lum_weighted_elliptical_azimuth'],
unit=['arcsec', 'degrees'],
description='light-weighted elliptical polar coordinates of each bin '
'from the galaxy center.'),
MapsProperty('bin_area', ivar=False, mask=False, channels=None,
unit='arcsec^2',
description='Area of each bin.'),
MapsProperty('bin_farea', ivar=False, mask=False, channels=None,
unit=None,
description='Fractional area that the bin covers for the expected bin '
'shape (only relevant for radial binning).'),
MapsProperty('bin_mflux', ivar=True, mask=True, channels=None,
unit='1E-17 erg/s/cm^2/ang/spaxel',
description='Mean flux in the r-band for the binned spectra.'),
MapsProperty('bin_snr', ivar=False, mask=False, channels=None,
unit=None,
description='r-band signal-to-noise ratio per pixel in the binned spectra.'),
MapsProperty('stellar_vel', ivar=True, mask=True, channels=None,
unit='km/s',
description='Stellar velocity relative to NSA redshift.'),
MapsProperty('stellar_sigma', ivar=True, mask=True, channels=None,
unit='km/s',
description='Stellar velocity dispersion (must be corrected using '
'STELLAR_SIGMACORR)'),
MapsProperty('stellar_sigmacorr', ivar=False, mask=False, channels=None,
unit='km/s',
description='Quadrature correction for STELLAR_SIGMA to obtain the '
'astrophysical velocity dispersion.)'),
MapsProperty('stellar_cont_fresid', ivar=False, mask=False,
channels=['68th_percentile', '99th_percentile'],
unit=None,
description='68%% and 99%% growth of the fractional residuals between '
'the model and data'),
MapsProperty('stellar_cont_rchi2', ivar=False, mask=False, channels=None,
unit=None,
description='Reduced chi-square of the stellar continuum fit.'),
MapsProperty('emline_sflux', ivar=True, mask=True,
channels=MPL4_emline_channels + MPL5_extra_channels,
unit='1E-17 erg/s/cm^2/spaxel',
description='Non-parametric summed flux for emission lines.'),
MapsProperty('emline_sew', ivar=True, mask=True,
channels=MPL4_emline_channels + MPL5_extra_channels,
unit='Angstrom',
description='Emission line non-parametric equivalent widths measurements.'),
MapsProperty('emline_gflux', ivar=True, mask=True,
channels=MPL4_emline_channels + MPL5_extra_channels,
unit='1E-17 erg/s/cm^2/spaxel',
description='Gaussian profile integrated flux for emission lines.'),
MapsProperty('emline_gvel', ivar=True, mask=True,
channels=MPL4_emline_channels + MPL5_extra_channels,
unit='km/s',
description='Gaussian profile velocity for emission lines.'),
MapsProperty('emline_gsigma', ivar=True, mask=True,
channels=MPL4_emline_channels + MPL5_extra_channels,
unit='km/s',
description='Gaussian profile velocity dispersion for emission lines; '
'must be corrected using EMLINE_INSTSIGMA'),
MapsProperty('emline_instsigma', ivar=False, mask=False,
channels=MPL4_emline_channels + MPL5_extra_channels,
unit='km/s',
description='Instrumental dispersion at the fitted line center.'),
MapsProperty('specindex', ivar=True, mask=True,
channels=['d4000', 'dn4000'],
unit=None,
description='Measurements of spectral indices.'),
MapsProperty('specindex_corr', ivar=False, mask=False,
channels=['d4000', 'dn4000'],
unit=None,
description='Velocity dispersion corrections for the '
'spectral index measurements '
'(can be ignored for D4000, Dn4000).')],
version='2.0.2')}
def get_dap_datamodel(dapver=None):
"""Returns the correct DAP datamodel for dapver."""
if not dapver:
__, dapver = config.lookUpVersions(config.release)
if dapver not in dap_datamodel:
return dap_datamodel[default_version]
else:
return dap_datamodel[dapver]
def get_dap_maplist(dapver=None, web=None):
''' Returns a list of all possible maps for dapver '''
dapdm = get_dap_datamodel(dapver)
daplist = []
for p in dapdm:
if p.channels:
if web:
daplist.extend(['{0}:{1}'.format(p.name, c) for c in p.channels])
else:
daplist.extend(['{0}_{1}'.format(p.name, c) for c in p.channels])
else:
daplist.append(p.name)
return daplist
def get_default_mapset(dapver=None):
''' Returns a list of six default maps for display '''
dapdefaults = {
# 6 defaults
# '1.1.1': ['emline_gflux:oiid_3728', 'emline_gflux:hb_4862', 'emline_gflux:oiii_5008',
# 'emline_gflux:ha_6564', 'emline_gflux:nii_6585', 'emline_gflux:sii_6718'],
# '2.0.2': ['emline_gflux:oiid_3728', 'emline_gflux:hb_4862', 'emline_gflux:oiii_5008',
# 'emline_gflux:ha_6564', 'emline_gflux:nii_6585', 'emline_gflux:sii_6718']
# 3 defaults
'1.1.1': ['stellar_vel', 'emline_gflux:ha_6564', 'specindex:d4000'],
'2.0.2': ['stellar_vel', 'emline_gflux:ha_6564', 'specindex:d4000']
}
return dapdefaults[dapver] if dapver in dapdefaults else []
def get_default_plot_params(dapver=None):
"""Returns default map plotting parameters."""
bitmasks = {'1.1.1': {'badData': {'doNotUse': 0}},
'2.0.2': {'nocov': 0,
'badData': {'unreliable': 5,
'doNotUse': 30}
}
}
plot_defaults = {
'1.1.1': {'default': {'bitmasks': bitmasks['1.1.1'],
'cmap': 'linearlab',
'percentile_clip': [5, 95],
'symmetric': False,
'snr_min': 1},
'vel': {'bitmasks': bitmasks['1.1.1'],
'cmap': 'RdBu_r',
'percentile_clip': [10, 90],
'symmetric': True,
'snr_min': None},
'sigma': {'bitmasks': bitmasks['1.1.1'],
'cmap': 'inferno',
'percentile_clip': [10, 90],
'symmetric': False,
'snr_min': 1}},
'2.0.2': {'default': {'bitmasks': bitmasks['2.0.2'],
'cmap': 'linearlab',
'percentile_clip': [5, 95],
'symmetric': False,
'snr_min': 1},
'vel': {'bitmasks': bitmasks['2.0.2'],
'cmap': 'RdBu_r',
'percentile_clip': [10, 90],
'symmetric': True,
'snr_min': None},
'sigma': {'bitmasks': bitmasks['2.0.2'],
'cmap': 'inferno',
'percentile_clip': [10, 90],
'symmetric': False,
'snr_min': 1}}
}
return plot_defaults[dapver] if dapver in plot_defaults else {}
|
bretthandrews/marvin
|
python/marvin/utils/dap/datamodel.py
|
Python
|
bsd-3-clause
| 16,189
|
[
"Galaxy",
"Gaussian"
] |
47c9e6cbe24a590efdf5a4faa0cafc7551ac5a96af16032226438ba6b543e673
|
import vtk
class Cut:
def __init__(self, volume, plane, z):
self.volume = volume
self.color = volume.color
self.cutEdges = vtk.vtkCutter()
self.cutEdges.SetInputConnection(self.volume.normal.GetOutputPort())
self.cutEdges.SetCutFunction(plane)
self.cutEdges.GenerateCutScalarsOn()
self.cutEdges.SetValue(0, z)
self.cutStrips = vtk.vtkStripper()
self.cutStrips.SetInputConnection(self.cutEdges.GetOutputPort())
self.cutStrips.Update()
self.cutPoly = vtk.vtkPolyData()
self.cutPoly.SetPoints(self.cutStrips.GetOutput().GetPoints())
self.cutPoly.SetPolys(self.cutStrips.GetOutput().GetLines())
self.cutTriangles = vtk.vtkTriangleFilter()
self.cutTriangles.SetInput(self.cutPoly)
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInput(self.cutPoly)
self.mapper.SetInputConnection(self.cutTriangles.GetOutputPort())
def move(self, v):
self.cutEdges.SetValue(0, v)
self.cutStrips.Update()
self.cutPoly.SetPoints(self.cutStrips.GetOutput().GetPoints())
self.cutPoly.SetPolys(self.cutStrips.GetOutput().GetLines())
self.mapper.Update()
|
FedericoV/FractalMammaryLobule
|
modeler/view/elements/Cut.py
|
Python
|
gpl-3.0
| 1,229
|
[
"VTK"
] |
bfb92936d7279681de1dc11733842709bb5f7727eb7ef4918ac150b8e81735a6
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class SalesOrder(SellingController):
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.warehouse:
frappe.throw(_("Reserved warehouse required for stock item {0}").format(d.item_code))
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list):
frappe.msgprint(_("Warning: Same item has been entered multiple times."))
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'items')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get("items") if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def on_submit(self):
super(SalesOrder, self).on_submit()
self.check_credit_limit()
self.update_stock_ledger(update_stock = 1)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.base_grand_total, self)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
ls=self.items
for i in range(len(ls)):
vhcl_no=ls[i].select_engine_no
q=frappe.db.sql("""update `tabVehicle Info` set vehicle_status=1 where name=%s""",(vhcl_no))
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Orders")
return list_context
@frappe.whitelist()
def stop_or_unstop_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status=="Stop":
if so.status not in ("Stopped", "Cancelled") and (so.per_delivered < 100 or so.per_billed < 100):
so.stop_sales_order()
else:
if so.status == "Stopped":
so.unstop_sales_order()
frappe.local.message_log = []
def before_recurring(self):
super(SalesOrder, self).before_recurring()
for field in ("delivery_status", "per_delivered", "billing_status", "per_billed"):
self.set(field, None)
for d in self.get("items"):
for field in ("delivered_qty", "billed_amt", "planned_qty", "prevdoc_docname"):
d.set(field, None)
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.get_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
#=====================================================================================================
@frappe.whitelist()
def get_vehicle_details(vehicle_id):
q=frappe.db.sql("""select engine_no,key_no,battery_no,chassis_no,model_no from `tabVehicle Info` where name=%s""",(vehicle_id))
#if q:
#q1=frappe.db.sql("""update `tabVehicle Info` set vehicle_status=0 where name=%s""",(vehicle_id))
#else:
# q1=frappe.db.sql("""update `tabVehicle Info` set vehicle_status=0 where name=%s""",(vehicle_id))
return(q)
@frappe.whitelist()
def item_info(item_code):
q=frappe.db.sql("""select item_name, item_color,description, image from `tabItem` where item_code=%s""",(item_code))
return (q)
@frappe.whitelist()
def change_status(v_id):
frappe.msgprint(v_id)
#if v_id:
# q1=frappe.db.sql("""update `tabVehicle Info` set vehicle_status=1 where name=%s""",(v_id))
#========================================================================================================
|
reddymeghraj/showroom
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 16,347
|
[
"VisIt"
] |
66d5e64d17119c3d317dfa589848d09117504a5d80a6596ce71cae774400d2b7
|
"""plotting fields defined on atoms during a simulation."""
from ase.visualize.primiplotter import PostScriptFile, PnmFile, GifFile, JpegFile, X11Window
from ase.visualize.primiplotter import PrimiPlotter as _PrimiPlotter
import numpy
import time
class FieldPlotter(_PrimiPlotter):
def __init__(self, atoms, datasource=None, verbose=0, timing=0,
interval=1, initframe=0):
_PrimiPlotter.__init__(self, atoms, verbose=verbose, timing=timing,
interval=interval, initframe=initframe)
self.datasource = datasource
self.dims = (100,100)
self.set_plot_plane("xy")
self.set_data_range("plot")
self.set_background(0.0)
self.set_red_yellow_colors()
def set_plot_plane(self, plane):
"""Set the plotting plane to xy, xz or yz (default: xy)"""
if plane in ("xy", "xz", "yz"):
self.plane = plane
else:
raise ValueError, "The argument to plotPlane must be 'xy', 'xz' or 'yz'."
def set_data_range(self, range1, range2=None):
"""Set the range of the data used when coloring.
This function sets the range of data values mapped unto colors
in the final plot.
Three possibilities:
'data': Autoscale using the data on visible atoms.
The range goes from the lowest to the highest
value present on the atoms. If only a few atoms
have extreme values, the entire color range may not
be used on the plot, as many values may be averaged
on each point in the plot.
'plot': Autoscale using the data on the plot. Unlike 'data'
this guarantees that the entire color range is used.
min, max: Use the range [min, max]
"""
if (range1 == "data" or range1 == "plot") and range2 == None:
self.autorange = range1
elif range2 != None:
self.autorange = None
self.range = (range1, range2)
else:
raise ValueError, "Illegal argument(s) to set_data_range"
def set_background(self, value):
"""Set the data value of the background. See also set_background_color
Set the value of the background (parts of the plot without atoms) to
a specific value, or to 'min' or 'max' representing the minimal or
maximal data values on the atoms.
Calling set_background cancels previous calls to set_background_color.
"""
self.background = value
self.backgroundcolor = None
def set_background_color(self, color):
"""Set the background color. See also set_background.
Set the background color. Use a single value in the range [0, 1[
for gray values, or a tuple of three such values as an RGB color.
Calling set_background_color cancels previous calls to set_background.
"""
self.background = None
self.backgroundcolor = color
def set_red_yellow_colors(self, reverse=False):
"""Set colors to Black-Red-Yellow-White (a.k.a. STM colors)"""
self.set_colors([(0.0, 0, 0, 0),
(0.33, 1, 0, 0),
(0.66, 1, 1, 0),
(1.0, 1, 1, 1)],
reverse)
def set_black_white_colors(self, reverse=False):
"""Set the color to Black-White (greyscale)"""
self.set_colors([(0.0, 0), (1.0, 1)], reverse)
def set_colors(self, colors, reverse=False):
colors = numpy.array(colors, numpy.float)
if len(colors.shape) != 2:
raise ValueError, "Colors must be a 2D array."
if reverse:
colors[:,0] = 1 - colors[:,0]
colors = numpy.array(colors[::-1,:])
#print colors
if colors[0,0] != 0.0 or colors[-1,0] != 1.0:
raise ValueError, "First row must define the value 0 and last row must define the value 1"
if colors.shape[1] == 2:
self.colormode = 1
elif colors.shape[1] == 4:
self.colormode = 3
else:
raise ValueError, "Color specification must be Nx2 (grey) or Nx4 (rgb) matrix."
self.colorfunction = InterpolatingFunction(colors[:,0], colors[:,1:])
def plot(self, data=None):
"""Create a plot now. Does not respect the interval timer.
This method makes a plot unconditionally. It does not look at
the interval variable, nor is this plot taken into account in
the counting done by the update() method if an interval
variable was specified.
If data is specified, it must be an array of numbers with the
same length as the atoms. That data will then be plotted. If
no data is given, the data source specified when creating the
plotter is used.
"""
if self.timing:
self._starttimer()
self.log("FieldPlotter: Starting plot at "
+ time.strftime("%a, %d %b %Y %H:%M:%S"))
if data is None:
data = self.datasource()
if len(data) != len(self.atoms):
raise ValueError, ("Data has wrong length: %d instead of %d."
% (len(data), len(self.atoms)))
invisible = self._getinvisible()
coords = self._rotate(self._getpositions())
radii = self._getradii()
if self.autoscale:
self._autoscale(coords,radii)
scale = self.scale * self.relativescale
coords = scale * coords
center = self._getcenter(coords)
offset = numpy.array(self.dims + (0.0,))/2.0 - center
coords = coords + offset
radii = radii * scale
self.log("Scale is %f and size is (%d, %d)"
% (scale, self.dims[0], self.dims[1]))
self.log("Physical size of plot is %f Angstrom times %f Angstrom"
% (self.dims[0] / scale, self.dims[1] / scale))
# Remove invisible atoms
selector = numpy.logical_not(invisible)
coords = numpy.compress(selector, coords, 0)
radii = numpy.compress(selector, radii)
data = numpy.compress(selector, data)
self.log("plotting data in the range [%f,%f]" %
(data.min(), data.max()))
# Now create the output array
sumarray = numpy.zeros(self.dims, numpy.float)
weight = numpy.zeros(self.dims)
# Loop over all atoms, and plot them
nmiss = 0
if self.plane == "xy":
xy = coords[:,:2]
elif self.plane == "xz":
xy = coords[:,::2]
elif self.plane == "yz":
xy = coords[:,1:]
else:
raise RuntimeError, "self.plane is bogus: "+str(self.plane)
assert xy.shape[1] == 2
self.log("plotting %d atoms on %d * %d (= %d) grid" %
(len(xy), sumarray.shape[0], sumarray.shape[1],
len(sumarray.flat)))
xy = xy.astype(numpy.int)
for i in xrange(len(xy)):
(x, y) = xy[i]
d = data[i]
if (x >= 0 and x < self.dims[0] and y >= 0 and y < self.dims[1]):
sumarray[x,y] += d
weight[x,y] += 1
else:
nmiss += 1
print "... %d atoms fell outside plot." % (nmiss,)
datamap = self._makedatamap(sumarray, weight, data.min(), data.max())
self.log("Range of data map: [%f, %f]" %
(datamap.min(), datamap.max()))
plot = self._makeplotmap(datamap, weight)
#self.log("Range of plot: [%f, %f]" %
# (min(plot.flat), max(plot.flat)))
examinplot = plot[:]
examinplot.shape = (plot.shape[0] * plot.shape[1],) + plot.shape[2:]
self.log("Range of plot: %s -> %s" %
(str(examinplot.min(0)), str(examinplot.max(0))))
del examinplot
for device in self.outputdevice:
device.inform_about_scale(scale)
device.plotArray(self.n, numpy.swapaxes(plot,0,1))
self.n = self.n + 1
self.log("FieldPlotter: Finished plotting at "
+ time.strftime("%a, %d %b %Y %H:%M:%S"))
self.log("\n\n")
def _makedatamap(self, sumarray, weight, minimum, maximum):
background = numpy.equal(weight, 0)
print "Number of background points:", sum(background.flat)
datamap = sumarray / numpy.where(background, 1, weight)
if self.background is not None:
if self.background == "min":
bg = minimum
elif self.background == "max":
bg = maximum
else:
bg = self.background
datamap = numpy.where(background, bg, datamap)
if self.autorange == "data":
datamap = (datamap - minimum) / (maximum - minimum)
self.log("Autorange using data. Data range is [%f, %f]"
% (minimum, maximum))
elif self.autorange == "plot":
ma = numpy.where(background, minimum, datamap).max()
mi = numpy.where(background, maximum, datamap).min()
datamap = (datamap - mi) / (ma - mi)
self.log("Autorange using plot. Data range is [%f, %f]"
% (mi, ma))
else:
assert self.autorange == None
datamap = (datamap - self.range[0]) / (self.range[1]
- self.range[0])
datamap = numpy.clip(datamap, 0.0, 1.0)
self.log("Data range specified by user: [%f, %f]" % self.range)
datamap = numpy.where(background, bg, datamap)
assert datamap.min() >= 0 and datamap.max() <= 1.0
return datamap
def _makeplotmap(self, datamap, weight):
plot = numpy.zeros(self.dims + (self.colormode,), numpy.float)
for i in range(self.dims[0]):
for j in range(self.dims[1]):
if self.backgroundcolor is not None and weight[i,j] == 0:
plot[i,j,:] = self.backgroundcolor
else:
x = datamap[i,j]
plot[i,j,:] = self.colorfunction(x)
return plot
class InterpolatingFunction:
def __init__(self, xpoints, ypoints):
if len(xpoints) != len(ypoints):
raise ValueError, "Length of x and y arrays should be the same."
idx = xpoints.argsort()
self.xpoints = xpoints[idx]
self.ypoints = ypoints[idx]
def __call__(self, x):
n = self.xpoints.searchsorted(x)
if n == 0:
return self.ypoints[0]
if n == len(self.xpoints):
return self.xpoints[-1]
x0 = self.xpoints[n-1]
x1 = self.xpoints[n]
y0 = self.ypoints[n-1]
y1 = self.ypoints[n]
return y0 + (y1 - y0) / (x1 - x0) * (x - x0)
|
grhawk/ASE
|
tools/ase/visualize/fieldplotter.py
|
Python
|
gpl-2.0
| 11,124
|
[
"ASE"
] |
1b68376eddce0798a3ab9a3a83e9fd62e41d14e86081bbd20884e31068661dd4
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing classes to generate grain boundaries.
"""
import itertools
import logging
import warnings
from fractions import Fraction
from functools import reduce
from math import cos, floor, gcd
import numpy as np
from monty.fractions import lcm
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
# This module implements representations of grain boundaries, as well as
# algorithms for generating them.
__author__ = "Xiang-Guo Li"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Xiang-Guo Li"
__email__ = "xil110@ucsd.edu"
__date__ = "7/30/18"
logger = logging.getLogger(__name__)
class GrainBoundary(Structure):
"""
Subclass of Structure representing a GrainBoundary (gb) object.
Implements additional attributes pertaining to gbs, but the
init method does not actually implement any algorithm that
creates a gb. This is a DUMMY class who's init method only holds
information about the gb. Also has additional methods that returns
other information about a gb such as sigma value.
Note that all gbs have the gb surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the gb surface plane (at
least for one grain) and the c vector is out of the surface plane
(though not necessary perpendicular to the surface.)
"""
def __init__(
self,
lattice,
species,
coords,
rotation_axis,
rotation_angle,
gb_plane,
join_plane,
init_cell,
vacuum_thickness,
ab_shift,
site_properties,
oriented_unit_cell,
validate_proximity=False,
coords_are_cartesian=False,
):
"""
Makes a gb structure, a structure object with additional information
and methods pertaining to gbs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
rotation_axis (list): Rotation axis of GB in the form of a list of
integers, e.g. [1, 1, 0].
rotation_angle (float, in unit of degree): rotation angle of GB.
gb_plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3].
join_plane (list): Joining plane of the second grain in the form of a list of
integers. e.g.: [1, 2, 3].
init_cell (Structure): initial bulk structure to form the GB.
site_properties (dict): Properties associated with the sites as a
dict of sequences, The sequences have to be the same length as
the atomic species and fractional_coords. For gb, you should
have the 'grain_label' properties to classify the sites as 'top',
'bottom', 'top_incident', or 'bottom_incident'.
vacuum_thickness (float in angstrom): The thickness of vacuum inserted
between two grains of the GB.
ab_shift (list of float, in unit of crystal vector a, b): The relative
shift along a, b vectors.
oriented_unit_cell (Structure): oriented unit cell of the bulk init_cell.
Help to accurate calculate the bulk properties that are consistent
with gb calculations.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
"""
self.oriented_unit_cell = oriented_unit_cell
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.gb_plane = gb_plane
self.join_plane = join_plane
self.init_cell = init_cell
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift
super().__init__(
lattice,
species,
coords,
validate_proximity=validate_proximity,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
def copy(self):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
return GrainBoundary(
self.lattice,
self.species_and_occu,
self.frac_coords,
self.rotation_axis,
self.rotation_angle,
self.gb_plane,
self.join_plane,
self.init_cell,
self.vacuum_thickness,
self.ab_shift,
self.site_properties,
self.oriented_unit_cell,
)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return GrainBoundary(
s.lattice,
s.species_and_occu,
s.frac_coords,
self.rotation_axis,
self.rotation_angle,
self.gb_plane,
self.join_plane,
self.init_cell,
self.vacuum_thickness,
self.ab_shift,
self.site_properties,
self.oriented_unit_cell,
)
@property
def sigma(self):
"""
This method returns the sigma value of the gb.
If using 'quick_gen' to generate GB, this value is not valid.
"""
return int(round(self.oriented_unit_cell.volume / self.init_cell.volume))
@property
def sigma_from_site_prop(self):
"""
This method returns the sigma value of the gb from site properties.
If the GB structure merge some atoms due to the atoms too closer with
each other, this property will not work.
"""
num_coi = 0
if None in self.site_properties["grain_label"]:
raise RuntimeError("Site were merged, this property do not work")
for tag in self.site_properties["grain_label"]:
if "incident" in tag:
num_coi += 1
return int(round(self.num_sites / num_coi))
@property
def top_grain(self):
"""
return the top grain (Structure) of the GB.
"""
top_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "top" in tag:
top_sites.append(self.sites[i])
return Structure.from_sites(top_sites)
@property
def bottom_grain(self):
"""
return the bottom grain (Structure) of the GB.
"""
bottom_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "bottom" in tag:
bottom_sites.append(self.sites[i])
return Structure.from_sites(bottom_sites)
@property
def coincidents(self):
"""
return the a list of coincident sites.
"""
coincident_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "incident" in tag:
coincident_sites.append(self.sites[i])
return coincident_sites
def __str__(self):
comp = self.composition
outs = [
"Gb Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
f"Rotation axis: {self.rotation_axis}",
f"Rotation angle: {self.rotation_angle}",
f"GB plane: {self.gb_plane}",
f"Join plane: {self.join_plane}",
f"vacuum thickness: {self.vacuum_thickness}",
f"ab_shift: {self.ab_shift}",
]
def to_s(x, rjust=10):
return ("%0.6f" % x).rjust(rjust)
outs.append("abc : " + " ".join([to_s(i) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i) for i in self.lattice.angles]))
outs.append(f"Sites ({len(self)})")
for i, site in enumerate(self):
outs.append(
" ".join(
[
str(i + 1),
site.species_string,
" ".join([to_s(j, 12) for j in site.frac_coords]),
]
)
)
return "\n".join(outs)
def as_dict(self):
"""
Returns:
Dictionary representation of GrainBoundary object
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["init_cell"] = self.init_cell.as_dict()
d["rotation_axis"] = self.rotation_axis
d["rotation_angle"] = self.rotation_angle
d["gb_plane"] = self.gb_plane
d["join_plane"] = self.join_plane
d["vacuum_thickness"] = self.vacuum_thickness
d["ab_shift"] = self.ab_shift
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
return d
@classmethod
def from_dict(cls, d):
"""
Generates a GrainBoundary object from a dictionary created by as_dict().
Args:
d: dict
Returns:
GrainBoundary object
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return GrainBoundary(
lattice=lattice,
species=s.species_and_occu,
coords=s.frac_coords,
rotation_axis=d["rotation_axis"],
rotation_angle=d["rotation_angle"],
gb_plane=d["gb_plane"],
join_plane=d["join_plane"],
init_cell=Structure.from_dict(d["init_cell"]),
vacuum_thickness=d["vacuum_thickness"],
ab_shift=d["ab_shift"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
site_properties=s.site_properties,
)
class GrainBoundaryGenerator:
"""
This class is to generate grain boundaries (GBs) from bulk
conventional cell (fcc, bcc can from the primitive cell), and works for Cubic,
Tetragonal, Orthorhombic, Rhombohedral, and Hexagonal systems.
It generate GBs from given parameters, which includes
GB plane, rotation axis, rotation angle.
This class works for any general GB, including twist, tilt and mixed GBs.
The three parameters, rotation axis, GB plane and rotation angle, are
sufficient to identify one unique GB. While sometimes, users may not be able
to tell what exactly rotation angle is but prefer to use sigma as an parameter,
this class also provides the function that is able to return all possible
rotation angles for a specific sigma value.
The same sigma value (with rotation axis fixed) can correspond to
multiple rotation angles.
Users can use structure matcher in pymatgen to get rid of the redundant structures.
"""
def __init__(self, initial_structure, symprec=0.1, angle_tolerance=1):
"""
initial_structure (Structure): Initial input structure. It can
be conventional or primitive cell (primitive cell works for bcc and fcc).
For fcc and bcc, using conventional cell can lead to a non-primitive
grain boundary structure.
This code supplies Cubic, Tetragonal, Orthorhombic, Rhombohedral, and
Hexagonal systems.
symprec (float): Tolerance for symmetry finding. Defaults to 0.1 (the value used
in Materials Project), which is for structures with slight deviations
from their proper atomic positions (e.g., structures relaxed with
electronic structure codes).
A smaller value of 0.01 is often used for properly refined
structures with atoms in the proper symmetry coordinates.
User should make sure the symmetry is what you want.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
analyzer = SpacegroupAnalyzer(initial_structure, symprec, angle_tolerance)
self.lat_type = analyzer.get_lattice_type()[0]
if self.lat_type == "t":
# need to use the conventional cell for tetragonal
initial_structure = analyzer.get_conventional_standard_structure()
a, b, c = initial_structure.lattice.abc
# c axis of tetragonal structure not in the third direction
if abs(a - b) > symprec:
# a == c, rotate b to the third direction
if abs(a - c) < symprec:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
# b == c, rotate a to the third direction
else:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
elif self.lat_type == "h":
alpha, beta, gamma = initial_structure.lattice.angles
# c axis is not in the third direction
if abs(gamma - 90) < angle_tolerance:
# alpha = 120 or 60, rotate b, c to a, b vectors
if abs(alpha - 90) > angle_tolerance:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
# beta = 120 or 60, rotate c, a to a, b vectors
elif abs(beta - 90) > angle_tolerance:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
elif self.lat_type == "r":
# need to use primitive cell for rhombohedra
initial_structure = analyzer.get_primitive_standard_structure()
elif self.lat_type == "o":
# need to use the conventional cell for orthorombic
initial_structure = analyzer.get_conventional_standard_structure()
self.initial_structure = initial_structure
def gb_from_parameters(
self,
rotation_axis,
rotation_angle,
expand_times=4,
vacuum_thickness=0.0,
ab_shift=[0, 0],
normal=False,
ratio=None,
plane=None,
max_search=20,
tol_coi=1.0e-8,
rm_ratio=0.7,
quick_gen=False,
):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object).
"""
lat_type = self.lat_type
# if the initial structure is primitive cell in cubic system,
# calculate the transformation matrix from its conventional cell
# to primitive cell, basically for bcc and fcc systems.
trans_cry = np.eye(3)
if lat_type == "c":
analyzer = SpacegroupAnalyzer(self.initial_structure)
convention_cell = analyzer.get_conventional_standard_structure()
vol_ratio = self.initial_structure.volume / convention_cell.volume
# bcc primitive cell, belong to cubic system
if abs(vol_ratio - 0.5) < 1.0e-3:
trans_cry = np.array([[0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5]])
logger.info("Make sure this is for cubic with bcc primitive cell")
# fcc primitive cell, belong to cubic system
elif abs(vol_ratio - 0.25) < 1.0e-3:
trans_cry = np.array([[0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]])
logger.info("Make sure this is for cubic with fcc primitive cell")
else:
logger.info("Make sure this is for cubic with conventional cell")
elif lat_type == "t":
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2")
elif len(ratio) != 2:
raise RuntimeError("Tetragonal system needs correct c2/a2 ratio")
elif lat_type == "o":
logger.info("Make sure this is for orthorhombic system")
if ratio is None:
raise RuntimeError("CSL does not exist if all axial ratios are irrational for an orthorhombic system")
if len(ratio) != 3:
raise RuntimeError("Orthorhombic system needs correct c2:b2:a2 ratio")
elif lat_type == "h":
logger.info("Make sure this is for hexagonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2")
elif len(ratio) != 2:
raise RuntimeError("Hexagonal system needs correct c2/a2 ratio")
elif lat_type == "r":
logger.info("Make sure this is for rhombohedral system")
if ratio is None:
logger.info("Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio")
elif len(ratio) != 2:
raise RuntimeError("Rhombohedral system needs correct (1+2*cos(alpha)/cos(alpha) ratio")
else:
raise RuntimeError(
"Lattice type not implemented. This code works for cubic, "
"tetragonal, orthorhombic, rhombehedral, hexagonal systems"
)
# transform four index notation to three index notation for hexagonal and rhombohedral
if len(rotation_axis) == 4:
u1 = rotation_axis[0]
v1 = rotation_axis[1]
w1 = rotation_axis[3]
if lat_type.lower() == "h":
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
rotation_axis = [u, v, w]
elif lat_type.lower() == "r":
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
rotation_axis = [u, v, w]
# make sure gcd(rotation_axis)==1
if reduce(gcd, rotation_axis) != 1:
rotation_axis = [int(round(x / reduce(gcd, rotation_axis))) for x in rotation_axis]
# transform four index notation to three index notation for plane
if plane is not None:
if len(plane) == 4:
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
# set the plane for grain boundary when plane is None.
if plane is None:
if lat_type.lower() == "c":
plane = rotation_axis
else:
if lat_type.lower() == "h":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "r":
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array(
[
[1, cos_alpha, cos_alpha],
[cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1],
]
)
elif lat_type.lower() == "t":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "o":
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array(
[
[1, 0, 0],
[0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]],
]
)
else:
raise RuntimeError("Lattice type has not implemented.")
plane = np.matmul(rotation_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in plane]
least_mul = reduce(lcm, [f.denominator for f in fractions])
plane = [int(round(x * least_mul)) for x in plane]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
t1, t2 = self.get_trans_mat(
r_axis=rotation_axis,
angle=rotation_angle,
normal=normal,
trans_cry=trans_cry,
lat_type=lat_type,
ratio=ratio,
surface=plane,
max_search=max_search,
quick_gen=quick_gen,
)
# find the join_plane
if lat_type.lower() != "c":
if lat_type.lower() == "h":
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
trans_cry1 = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0], [0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == "r":
if ratio is None:
c2_a2_ratio = 1
else:
mu, mv = ratio
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry1 = np.array(
[
[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
]
)
else:
if lat_type.lower() == "t":
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == "o":
new_ratio = [1 if v is None else v for v in ratio]
mu, lam, mv = new_ratio
trans_cry1 = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
else:
trans_cry1 = trans_cry
grain_matrix = np.dot(t2, trans_cry1)
plane_init = np.cross(grain_matrix[0], grain_matrix[1])
if lat_type.lower() != "c":
plane_init = np.dot(plane_init, trans_cry1.T)
join_plane = self.vec_to_surface(plane_init)
parent_structure = self.initial_structure.copy()
# calculate the bond_length in bulk system.
if len(parent_structure) == 1:
temp_str = parent_structure.copy()
temp_str.make_supercell([1, 1, 2])
distance = temp_str.distance_matrix
else:
distance = parent_structure.distance_matrix
bond_length = np.min(distance[np.nonzero(distance)])
# top grain
top_grain = fix_pbc(parent_structure * t1)
# obtain the smallest oriended cell
if normal and not quick_gen:
t_temp = self.get_trans_mat(
r_axis=rotation_axis,
angle=rotation_angle,
normal=False,
trans_cry=trans_cry,
lat_type=lat_type,
ratio=ratio,
surface=plane,
max_search=max_search,
)
oriended_unit_cell = fix_pbc(parent_structure * t_temp[0])
t_matrix = oriended_unit_cell.lattice.matrix
normal_v_plane = np.cross(t_matrix[0], t_matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
unit_ab_adjust = (t_matrix[2] - np.dot(unit_normal_v, t_matrix[2]) * unit_normal_v) / np.dot(
unit_normal_v, t_matrix[2]
)
else:
oriended_unit_cell = top_grain.copy()
unit_ab_adjust = 0.0
# bottom grain, using top grain's lattice matrix
bottom_grain = fix_pbc(parent_structure * t2, top_grain.lattice.matrix)
# label both grains with 'top','bottom','top_incident','bottom_incident'
n_sites = top_grain.num_sites
t_and_b = Structure(
top_grain.lattice,
top_grain.species + bottom_grain.species,
list(top_grain.frac_coords) + list(bottom_grain.frac_coords),
)
t_and_b_dis = t_and_b.lattice.get_all_distances(
t_and_b.frac_coords[0:n_sites], t_and_b.frac_coords[n_sites : n_sites * 2]
)
index_incident = np.nonzero(t_and_b_dis < np.min(t_and_b_dis) + tol_coi)
top_labels = []
for i in range(n_sites):
if i in index_incident[0]:
top_labels.append("top_incident")
else:
top_labels.append("top")
bottom_labels = []
for i in range(n_sites):
if i in index_incident[1]:
bottom_labels.append("bottom_incident")
else:
bottom_labels.append("bottom")
top_grain = Structure(
Lattice(top_grain.lattice.matrix),
top_grain.species,
top_grain.frac_coords,
site_properties={"grain_label": top_labels},
)
bottom_grain = Structure(
Lattice(bottom_grain.lattice.matrix),
bottom_grain.species,
bottom_grain.frac_coords,
site_properties={"grain_label": bottom_labels},
)
# expand both grains
top_grain.make_supercell([1, 1, expand_times])
bottom_grain.make_supercell([1, 1, expand_times])
top_grain = fix_pbc(top_grain)
bottom_grain = fix_pbc(bottom_grain)
# determine the top-grain location.
edge_b = 1.0 - max(bottom_grain.frac_coords[:, 2])
edge_t = 1.0 - max(top_grain.frac_coords[:, 2])
c_adjust = (edge_t - edge_b) / 2.0
# construct all species
all_species = []
all_species.extend([site.specie for site in bottom_grain])
all_species.extend([site.specie for site in top_grain])
half_lattice = top_grain.lattice
# calculate translation vector, perpendicular to the plane
normal_v_plane = np.cross(half_lattice.matrix[0], half_lattice.matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
translation_v = unit_normal_v * vacuum_thickness
# construct the final lattice
whole_matrix_no_vac = np.array(half_lattice.matrix)
whole_matrix_no_vac[2] = half_lattice.matrix[2] * 2
whole_matrix_with_vac = whole_matrix_no_vac.copy()
whole_matrix_with_vac[2] = whole_matrix_no_vac[2] + translation_v * 2
whole_lat = Lattice(whole_matrix_with_vac)
# construct the coords, move top grain with translation_v
all_coords = []
grain_labels = bottom_grain.site_properties["grain_label"] + top_grain.site_properties["grain_label"]
for site in bottom_grain:
all_coords.append(site.coords)
for site in top_grain:
all_coords.append(
site.coords
+ half_lattice.matrix[2] * (1 + c_adjust)
+ unit_ab_adjust * np.linalg.norm(half_lattice.matrix[2] * (1 + c_adjust))
+ translation_v
+ ab_shift[0] * whole_matrix_with_vac[0]
+ ab_shift[1] * whole_matrix_with_vac[1]
)
gb_with_vac = Structure(
whole_lat,
all_species,
all_coords,
coords_are_cartesian=True,
site_properties={"grain_label": grain_labels},
)
# merge closer atoms. extract near gb atoms.
cos_c_norm_plane = np.dot(unit_normal_v, whole_matrix_with_vac[2]) / whole_lat.c
range_c_len = abs(bond_length / cos_c_norm_plane / whole_lat.c)
sites_near_gb = []
sites_away_gb = []
for site in gb_with_vac.sites:
if (
site.frac_coords[2] < range_c_len
or site.frac_coords[2] > 1 - range_c_len
or (site.frac_coords[2] > 0.5 - range_c_len and site.frac_coords[2] < 0.5 + range_c_len)
):
sites_near_gb.append(site)
else:
sites_away_gb.append(site)
if len(sites_near_gb) >= 1:
s_near_gb = Structure.from_sites(sites_near_gb)
s_near_gb.merge_sites(tol=bond_length * rm_ratio, mode="d")
all_sites = sites_away_gb + s_near_gb.sites
gb_with_vac = Structure.from_sites(all_sites)
# move coordinates into the periodic cell.
gb_with_vac = fix_pbc(gb_with_vac, whole_lat.matrix)
return GrainBoundary(
whole_lat,
gb_with_vac.species,
gb_with_vac.cart_coords,
rotation_axis,
rotation_angle,
plane,
join_plane,
self.initial_structure,
vacuum_thickness,
ab_shift,
site_properties=gb_with_vac.site_properties,
oriented_unit_cell=oriended_unit_cell,
coords_are_cartesian=True,
)
def get_ratio(self, max_denominator=5, index_none=None):
"""
find the axial ratio needed for GB generator input.
Args:
max_denominator (int): the maximum denominator for
the computed ratio, default to be 5.
index_none (int): specify the irrational axis.
0-a, 1-b, 2-c. Only may be needed for orthorhombic system.
Returns:
axial ratio needed for GB generator (list of integers).
"""
structure = self.initial_structure
lat_type = self.lat_type
if lat_type in ("t", "h"):
# For tetragonal and hexagonal system, ratio = c2 / a2.
a, c = (structure.lattice.a, structure.lattice.c)
if c > a:
frac = Fraction(c ** 2 / a ** 2).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
else:
frac = Fraction(a ** 2 / c ** 2).limit_denominator(max_denominator)
ratio = [frac.denominator, frac.numerator]
elif lat_type == "r":
# For rhombohedral system, ratio = (1 + 2 * cos(alpha)) / cos(alpha).
cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)
frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
elif lat_type == "o":
# For orthorhombic system, ratio = c2:b2:a2.If irrational for one axis, set it to None.
ratio = [None] * 3
lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)
index = [0, 1, 2]
if index_none is None:
min_index = np.argmin(lat)
index.pop(min_index)
frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
com_lcm = lcm(frac1.denominator, frac2.denominator)
ratio[min_index] = com_lcm
ratio[index[0]] = frac1.numerator * int(round(com_lcm / frac1.denominator))
ratio[index[1]] = frac2.numerator * int(round(com_lcm / frac2.denominator))
else:
index.pop(index_none)
if lat[index[0]] > lat[index[1]]:
frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)
ratio[index[0]] = frac.numerator
ratio[index[1]] = frac.denominator
else:
frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)
ratio[index[1]] = frac.numerator
ratio[index[0]] = frac.denominator
elif lat_type == "c":
# Cubic system does not need axial ratio.
return None
else:
raise RuntimeError("Lattice type not implemented.")
return ratio
@staticmethod
def get_trans_mat(
r_axis,
angle,
normal=False,
trans_cry=np.eye(3),
lat_type="c",
ratio=None,
surface=None,
max_search=20,
quick_gen=False,
):
"""
Find the two transformation matrix for each grain from given rotation axis,
GB plane, rotation angle and corresponding ratio (see explanation for ratio
below).
The structure of each grain can be obtained by applying the corresponding
transformation matrix to the conventional cell.
The algorithm for this code is from reference, Acta Cryst, A32,783(1976).
Args:
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
angle (float, in unit of degree) :
the rotation angle of the grain boundary
normal (logic):
determine if need to require the c axis of one grain associated with
the first transformation matrix perperdicular to the surface or not.
default to false.
trans_cry (3 by 3 array):
if the structure given are primitive cell in cubic system, e.g.
bcc or fcc system, trans_cry is the transformation matrix from its
conventional cell to the primitive cell.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
surface (list of three integers, e.g. h, k, l
or four integers, e.g. h, k, i, l for hex/rho system only):
the miller index of grain boundary plane, with the format of [h,k,l]
if surface is not given, the default is perpendicular to r_axis, which is
a twist grain boundary.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
t1 (3 by 3 integer array):
The transformation array for one grain.
t2 (3 by 3 integer array):
The transformation array for the other grain
"""
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
if lat_type.lower() == "h":
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
r_axis = [u, v, w]
elif lat_type.lower() == "r":
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
if surface is not None:
if len(surface) == 4:
u1 = surface[0]
v1 = surface[1]
w1 = surface[3]
surface = [u1, v1, w1]
# set the surface for grain boundary.
if surface is None:
if lat_type.lower() == "c":
surface = r_axis
else:
if lat_type.lower() == "h":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "r":
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array(
[
[1, cos_alpha, cos_alpha],
[cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1],
]
)
elif lat_type.lower() == "t":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "o":
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array(
[
[1, 0, 0],
[0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]],
]
)
else:
raise RuntimeError("Lattice type has not implemented.")
surface = np.matmul(r_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
if lat_type.lower() == "h":
# set the value for u,v,w,mu,mv,m,n,d,x
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(
np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(float(d) / 3.0 / mu)
).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
r_list_inv = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
elif lat_type.lower() == "r":
# set the value for u,v,w,mu,mv,m,n,d
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError(
"For irrational ratio_alpha, CSL only exist for [1,1,1] or [u, v, -(u+v)] and m =0"
)
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + 2 * mv * (v * w + w * u + u * v)
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(float(d) / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
r_list_inv = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = r_list_inv + r_list + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
else:
u, v, w = r_axis
if lat_type.lower() == "c":
mu = 1
lam = 1
mv = 1
elif lat_type.lower() == "t":
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == "o":
if None in ratio:
mu, lam, mv = ratio
non_none = [i for i in ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError("No CSL exist for two irrational numbers")
non1, non2 = non_none
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError("For irrational b2, CSL only exist for [0,1,0] or [u,0,w] and m = 0")
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError("For irrational a2, CSL only exist for [1,0,0] or [0,v,w] and m = 0")
else:
mu, lam, mv = ratio
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# make sure mu, lambda, mv are coprime integers.
if reduce(gcd, [mu, lam, mv]) != 1:
temp = reduce(gcd, [mu, lam, mv])
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(d / mu / lam)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
r_list = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
r_list_inv = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
if sigma > 1000:
raise RuntimeError("Sigma >1000 too large. Are you sure what you are doing, Please check the GB if exist")
# transform surface, r_axis, r_matrix in terms of primitive lattice
surface = np.matmul(surface, np.transpose(trans_cry))
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
r_axis = np.rint(np.matmul(r_axis, np.linalg.inv(trans_cry))).astype(int)
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T)
# set one vector of the basis to the rotation axis direction, and
# obtain the corresponding transform matrix
eye = np.eye(3, dtype=np.int_)
for h in range(3):
if abs(r_axis[h]) != 0:
eye[h] = np.array(r_axis)
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
break
trans = eye.T
new_rot = np.array(r_matrix)
# with the rotation matrix to construct the CSL lattice, check reference for details
fractions = [Fraction(x).limit_denominator() for x in new_rot[:, k]]
least_mul = reduce(lcm, [f.denominator for f in fractions])
scale = np.zeros((3, 3))
scale[h, h] = 1
scale[k, k] = least_mul
scale[l, l] = sigma / least_mul
for i in range(least_mul):
check_int = i * new_rot[:, k] + (sigma / least_mul) * new_rot[:, l]
if all(np.round(x, 5).is_integer() for x in list(check_int)):
n_final = i
break
try:
n_final
except NameError:
raise RuntimeError("Something is wrong. Check if this GB exists or not")
scale[k, l] = n_final
# each row of mat_csl is the CSL lattice vector
csl_init = np.rint(np.dot(np.dot(r_matrix, trans), scale)).astype(int).T
if abs(r_axis[h]) > 1:
csl_init = GrainBoundaryGenerator.reduce_mat(np.array(csl_init), r_axis[h], r_matrix)
csl = np.rint(Lattice(csl_init).get_niggli_reduced_lattice().matrix).astype(int)
# find the best slab supercell in terms of the conventional cell from the csl lattice,
# which is the transformation matrix
# now trans_cry is the transformation matrix from crystal to cartesian coordinates.
# for cubic, do not need to change.
if lat_type.lower() != "c":
if lat_type.lower() == "h":
trans_cry = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0], [0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == "r":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry = np.array(
[
[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
]
)
else:
trans_cry = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
t1_final = GrainBoundaryGenerator.slab_from_csl(
csl, surface, normal, trans_cry, max_search=max_search, quick_gen=quick_gen
)
t2_final = np.array(np.rint(np.dot(t1_final, np.linalg.inv(r_matrix.T)))).astype(int)
return t1_final, t2_final
@staticmethod
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas
@staticmethod
def enum_sigma_hex(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in hexagonal system.
The algorithm for this code is from reference, Acta Cryst, A38,550(1982)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary.
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the hexagonal axial ratio, which is rational
number. If irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
else:
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 12 * mu * mv) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if (c2_a2_ratio is None) and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 12 * mu * mv - n ** 2 * d) / (3 * mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((3 * mu * m ** 2 + d * n ** 2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_rho(cutoff, r_axis, ratio_alpha):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio_alpha is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError(
"For irrational ratio_alpha, CSL only exist for [1,1,1] or [u, v, -(u+v)] and m =0"
)
else:
mu, mv = ratio_alpha
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + 2 * mv * (v * w + w * u + u * v)
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv))) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if ratio_alpha is None and u + v + w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv)) - n ** 2 * d) / (mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round(abs(F / com_fac)))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180.0
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_tet(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in tetragonal system.
The algorithm for this code is from reference, Acta Cryst, B46,117(1990)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the tetragonal axial ratio with rational number.
if irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if c2_a2_ratio is None and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv - n ** 2 * d) / mu))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 + mu * m ** 2,
]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 + mu * m ** 2,
]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * m ** 2 + d * n ** 2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_ort(cutoff, r_axis, c2_b2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in orthorhombic system.
The algorithm for this code is from reference, Scipta Metallurgica 27, 291(1992)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_b2_a2_ratio (list of three integers, e.g. mu,lamda, mv):
mu:lam:mv is the square of the orthorhombic axial ratio with rational
numbers. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, lambda, mv are coprime integers.
if None in c2_b2_a2_ratio:
mu, lam, mv = c2_b2_a2_ratio
non_none = [i for i in c2_b2_a2_ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError("No CSL exist for two irrational numbers")
non1, non2 = non_none
if reduce(gcd, non_none) != 1:
temp = reduce(gcd, non_none)
non1 = int(round(non1 / temp))
non2 = int(round(non2 / temp))
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError("For irrational b2, CSL only exist for [0,1,0] or [u,0,w] and m = 0")
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError("For irrational a2, CSL only exist for [1,0,0] or [0,v,w] and m = 0")
else:
mu, lam, mv = c2_b2_a2_ratio
if reduce(gcd, c2_b2_a2_ratio) != 1:
temp = reduce(gcd, c2_b2_a2_ratio)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# refer to the meaning of d in reference
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv * mv * lam) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
mu_temp, lam_temp, mv_temp = c2_b2_a2_ratio
if (mu_temp is None and w == 0) or (lam_temp is None and v == 0) or (mv_temp is None and u == 0):
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv * lam * mv - n ** 2 * d) / mu / lam))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * lam * m ** 2 + d * n ** 2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_possible_plane_cubic(plane_cutoff, r_axis, r_angle):
"""
Find all possible plane combinations for GBs given a rotaion axis and angle for
cubic system, and classify them to different categories, including 'Twist',
'Symmetric tilt', 'Normal tilt', 'Mixed' GBs.
Args:
plane_cutoff (integer): the cutoff of plane miller index.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
r_angle (float): rotation angle of the GBs.
Returns:
all_combinations (dict):
dictionary with keys as GB type, e.g. 'Twist','Symmetric tilt',etc.
and values as the combination of the two plane miller index
(GB plane and joining plane).
"""
all_combinations = {}
all_combinations["Symmetric tilt"] = []
all_combinations["Twist"] = []
all_combinations["Normal tilt"] = []
all_combinations["Mixed"] = []
sym_plane = symm_group_cubic([[1, 0, 0], [1, 1, 0]])
j = np.arange(0, plane_cutoff + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
miller = np.array(combination)
miller = miller[np.argsort(np.linalg.norm(miller, axis=1))]
for i, val in enumerate(miller):
if reduce(gcd, val) == 1:
matrix = GrainBoundaryGenerator.get_trans_mat(r_axis, r_angle, surface=val, quick_gen=True)
vec = np.cross(matrix[1][0], matrix[1][1])
miller2 = GrainBoundaryGenerator.vec_to_surface(vec)
if np.all(np.abs(np.array(miller2)) <= plane_cutoff):
cos_1 = abs(np.dot(val, r_axis) / np.linalg.norm(val) / np.linalg.norm(r_axis))
if 1 - cos_1 < 1.0e-5:
all_combinations["Twist"].append([list(val), miller2])
elif cos_1 < 1.0e-8:
sym_tilt = False
if np.sum(np.abs(val)) == np.sum(np.abs(miller2)):
ave = (np.array(val) + np.array(miller2)) / 2
ave1 = (np.array(val) - np.array(miller2)) / 2
for plane in sym_plane:
cos_2 = abs(np.dot(ave, plane) / np.linalg.norm(ave) / np.linalg.norm(plane))
cos_3 = abs(np.dot(ave1, plane) / np.linalg.norm(ave1) / np.linalg.norm(plane))
if 1 - cos_2 < 1.0e-5 or 1 - cos_3 < 1.0e-5:
all_combinations["Symmetric tilt"].append([list(val), miller2])
sym_tilt = True
break
if not sym_tilt:
all_combinations["Normal tilt"].append([list(val), miller2])
else:
all_combinations["Mixed"].append([list(val), miller2])
return all_combinations
@staticmethod
def get_rotation_angle_from_sigma(sigma, r_axis, lat_type="C", ratio=None):
"""
Find all possible rotation angle for the given sigma value.
Args:
sigma (integer):
sigma value provided
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
Returns:
rotation_angles corresponding to the provided sigma value.
If the sigma value is not correct, return the rotation angle corresponding
to the correct possible sigma value right smaller than the wrong sigma value provided.
"""
if lat_type.lower() == "c":
logger.info("Make sure this is for cubic system")
sigma_dict = GrainBoundaryGenerator.enum_sigma_cubic(cutoff=sigma, r_axis=r_axis)
elif lat_type.lower() == "t":
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2 ratio")
elif len(ratio) != 2:
raise RuntimeError("Tetragonal system needs correct c2/a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_tet(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == "o":
logger.info("Make sure this is for orthorhombic system")
if len(ratio) != 3:
raise RuntimeError("Orthorhombic system needs correct c2:b2:a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_ort(cutoff=sigma, r_axis=r_axis, c2_b2_a2_ratio=ratio)
elif lat_type.lower() == "h":
logger.info("Make sure this is for hexagonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2 ratio")
elif len(ratio) != 2:
raise RuntimeError("Hexagonal system needs correct c2/a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_hex(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == "r":
logger.info("Make sure this is for rhombohedral system")
if ratio is None:
logger.info("Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio")
elif len(ratio) != 2:
raise RuntimeError("Rhombohedral system needs correct (1+2*cos(alpha)/cos(alpha) ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_rho(cutoff=sigma, r_axis=r_axis, ratio_alpha=ratio)
else:
raise RuntimeError("Lattice type not implemented")
sigmas = list(sigma_dict.keys())
if not sigmas:
raise RuntimeError("This is a wriong sigma value, and no sigma exists smaller than this value.")
if sigma in sigmas:
rotation_angles = sigma_dict[sigma]
else:
sigmas.sort()
warnings.warn(
"This is not the possible sigma value according to the rotation axis!"
"The nearest neighbor sigma and its corresponding angle are returned"
)
rotation_angles = sigma_dict[sigmas[-1]]
rotation_angles.sort()
return rotation_angles
@staticmethod
def slab_from_csl(csl, surface, normal, trans_cry, max_search=20, quick_gen=False):
"""
By linear operation of csl lattice vectors to get the best corresponding
slab lattice. That is the area of a,b vectors (within the surface plane)
is the smallest, the c vector first, has shortest length perpendicular
to surface [h,k,l], second, has shortest length itself.
Args:
csl (3 by 3 integer array):
input csl lattice.
surface (list of three integers, e.g. h, k, l):
the miller index of the surface, with the format of [h,k,l]
normal (logic):
determine if the c vector needs to perpendicular to surface
trans_cry (3 by 3 array):
transform matrix from crystal system to orthogonal system
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, no need to find the smallest
cell if set to true.
Returns:
t_matrix: a slab lattice ( 3 by 3 integer array):
"""
# set the transform matrix in real space
trans = trans_cry
# transform matrix in reciprocal space
ctrans = np.linalg.inv(trans.T)
t_matrix = csl.copy()
# vectors constructed from csl that perpendicular to surface
ab_vector = []
# obtain the miller index of surface in terms of csl.
miller = np.matmul(surface, csl.T)
if reduce(gcd, miller) != 1:
miller = [int(round(x / reduce(gcd, miller))) for x in miller]
miller_nonzero = []
# quickly generate a supercell, normal is not work in this way
if quick_gen:
scale_factor = []
eye = np.eye(3, dtype=np.int_)
for i, j in enumerate(miller):
if j == 0:
scale_factor.append(eye[i])
else:
miller_nonzero.append(i)
if len(scale_factor) < 2:
index_len = len(miller_nonzero)
for i in range(index_len):
for j in range(i + 1, index_len):
lcm_miller = lcm(miller[miller_nonzero[i]], miller[miller_nonzero[j]])
l = [0, 0, 0]
l[miller_nonzero[i]] = -int(round(lcm_miller / miller[miller_nonzero[i]]))
l[miller_nonzero[j]] = int(round(lcm_miller / miller[miller_nonzero[j]]))
scale_factor.append(l)
if len(scale_factor) == 2:
break
t_matrix[0] = np.array(np.dot(scale_factor[0], csl))
t_matrix[1] = np.array(np.dot(scale_factor[1], csl))
t_matrix[2] = csl[miller_nonzero[0]]
if abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn("Too large matrix. Suggest to use quick_gen=False")
return t_matrix
for i, j in enumerate(miller):
if j == 0:
ab_vector.append(csl[i])
else:
c_index = i
miller_nonzero.append(j)
if len(miller_nonzero) > 1:
t_matrix[2] = csl[c_index]
index_len = len(miller_nonzero)
lcm_miller = []
for i in range(index_len):
for j in range(i + 1, index_len):
com_gcd = gcd(miller_nonzero[i], miller_nonzero[j])
mil1 = int(round(miller_nonzero[i] / com_gcd))
mil2 = int(round(miller_nonzero[j] / com_gcd))
lcm_miller.append(max(abs(mil1), abs(mil2)))
lcm_sorted = sorted(lcm_miller)
if index_len == 2:
max_j = lcm_sorted[0]
else:
max_j = lcm_sorted[1]
else:
if not normal:
t_matrix[0] = ab_vector[0]
t_matrix[1] = ab_vector[1]
t_matrix[2] = csl[c_index]
return t_matrix
max_j = abs(miller_nonzero[0])
max_j = min(max_j, max_search)
# area of a, b vectors
area = None
# length of c vector
c_norm = np.linalg.norm(np.matmul(t_matrix[2], trans))
# c vector length along the direction perpendicular to surface
c_length = np.abs(np.dot(t_matrix[2], surface))
# check if the init c vector perpendicular to the surface
if normal:
c_cross = np.cross(np.matmul(t_matrix[2], trans), np.matmul(surface, ctrans))
normal_init = np.linalg.norm(c_cross) < 1e-8
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) < 1.0e-8:
ab_vector.append(temp)
else:
# c vector length along the direction perpendicular to surface
c_len_temp = np.abs(np.dot(temp, surface))
# c vector length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.0e-8:
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
else:
if c_len_temp < c_length or (abs(c_len_temp - c_length) < 1.0e-8 and c_norm_temp < c_norm):
t_matrix[2] = temp
c_norm = c_norm_temp
c_length = c_len_temp
if normal and (not normal_init):
logger.info("Did not find the perpendicular c vector, increase max_j")
while not normal_init:
if max_j == max_search:
warnings.warn("Cannot find the perpendicular c vector, please increase max_search")
break
max_j = 3 * max_j
max_j = min(max_j, max_search)
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) > 1.0e-8:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.0e-8:
# c vetor length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
if normal_init:
logger.info("Found perpendicular c vector")
# find the best a, b vectors with their formed area smallest and average norm of a,b smallest.
for i in itertools.combinations(ab_vector, 2):
area_temp = np.linalg.norm(np.cross(np.matmul(i[0], trans), np.matmul(i[1], trans)))
if abs(area_temp - 0) > 1.0e-8:
ab_norm_temp = np.linalg.norm(np.matmul(i[0], trans)) + np.linalg.norm(np.matmul(i[1], trans))
if area is None:
area = area_temp
ab_norm = ab_norm_temp
t_matrix[0] = i[0]
t_matrix[1] = i[1]
elif area_temp < area:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
elif abs(area - area_temp) < 1.0e-8 and ab_norm_temp < ab_norm:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
# make sure we have a left-handed crystallographic system
if np.linalg.det(np.matmul(t_matrix, trans)) < 0:
t_matrix *= -1
if normal and abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn("Too large matrix. Suggest to use Normal=False")
return t_matrix
@staticmethod
def reduce_mat(mat, mag, r_matrix):
"""
Reduce integer array mat's determinant mag times by linear combination
of its row vectors, so that the new array after rotation (r_matrix) is
still an integer array
Args:
mat (3 by 3 array): input matrix
mag (integer): reduce times for the determinant
r_matrix (3 by 3 array): rotation matrix
Return:
the reduced integer array
"""
max_j = abs(int(round(np.linalg.det(mat) / mag)))
reduced = False
for h in range(3):
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
j = np.arange(-max_j, max_j + 1)
for j1, j2 in itertools.product(j, repeat=2):
temp = mat[h] + j1 * mat[k] + j2 * mat[l]
if all(np.round(x, 5).is_integer() for x in list(temp / mag)):
mat_copy = mat.copy()
mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp])
new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T))
if all(np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))):
reduced = True
mat[h] = np.array([int(round(ele / mag)) for ele in temp])
break
if reduced:
break
if not reduced:
warnings.warn("Matrix reduction not performed, may lead to non-primitive gb cell.")
return mat
@staticmethod
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.0e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round(com_lcm / frac[0].denominator))
miller[index[1]] = frac[1].numerator * int(round(com_lcm / frac[1].denominator))
return miller
def factors(n):
"""
Compute the factors of a integer.
Args:
n: the input integer
Returns:
a set of integers that are the factors of the input integer.
"""
return set(
reduce(
list.__add__,
([i, n // i] for i in range(1, int(np.sqrt(n)) + 1) if n % i == 0),
)
)
def fix_pbc(structure, matrix=None):
"""
Set all frac_coords of the input structure within [0,1].
Args:
structure (pymatgen structure object):
input structure
matrix (lattice matrix, 3 by 3 array/matrix)
new structure's lattice matrix, if none, use
input structure's matrix
Return:
new structure with fixed frac_coords and lattice matrix
"""
spec = []
coords = []
if matrix is None:
latte = Lattice(structure.lattice.matrix)
else:
latte = Lattice(matrix)
for site in structure:
spec.append(site.specie)
coord = np.array(site.frac_coords)
for i in range(3):
coord[i] -= floor(coord[i])
if np.allclose(coord[i], 1):
coord[i] = 0
elif np.allclose(coord[i], 0):
coord[i] = 0
else:
coord[i] = round(coord[i], 7)
coords.append(coord)
return Structure(latte, spec, coords, site_properties=structure.site_properties)
def symm_group_cubic(mat):
"""
obtain cubic symmetric eqivalents of the list of vectors.
Args:
matrix (lattice matrix, n by 3 array/matrix)
Return:
cubic symmetric eqivalents of the list of vectors.
"""
sym_group = np.zeros([24, 3, 3])
sym_group[0, :] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sym_group[1, :] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]
sym_group[2, :] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]
sym_group[3, :] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
sym_group[4, :] = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
sym_group[5, :] = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]
sym_group[6, :] = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]]
sym_group[7, :] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
sym_group[8, :] = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
sym_group[9, :] = [[-1, 0, 0], [0, 0, 1], [0, 1, 0]]
sym_group[10, :] = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
sym_group[11, :] = [[1, 0, 0], [0, 0, 1], [0, -1, 0]]
sym_group[12, :] = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]
sym_group[13, :] = [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]
sym_group[14, :] = [[0, -1, 0], [0, 0, 1], [-1, 0, 0]]
sym_group[15, :] = [[0, -1, 0], [0, 0, -1], [1, 0, 0]]
sym_group[16, :] = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
sym_group[17, :] = [[0, 0, 1], [-1, 0, 0], [0, -1, 0]]
sym_group[18, :] = [[0, 0, -1], [1, 0, 0], [0, -1, 0]]
sym_group[19, :] = [[0, 0, -1], [-1, 0, 0], [0, 1, 0]]
sym_group[20, :] = [[0, 0, -1], [0, -1, 0], [-1, 0, 0]]
sym_group[21, :] = [[0, 0, -1], [0, 1, 0], [1, 0, 0]]
sym_group[22, :] = [[0, 0, 1], [0, -1, 0], [1, 0, 0]]
sym_group[23, :] = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]]
mat = np.atleast_2d(mat)
all_vectors = []
for sym in sym_group:
for vec in mat:
all_vectors.append(np.dot(sym, vec))
return np.unique(np.array(all_vectors), axis=0)
|
vorwerkc/pymatgen
|
pymatgen/analysis/gb/grain.py
|
Python
|
mit
| 115,781
|
[
"CRYSTAL",
"pymatgen"
] |
2c9a523484ddd369d782be39f57931f85977bf45f7d0db40a0421bdab7ae7d3c
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from .. import config_context
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Maximum Likelihood Estimator of covariance.
precision : ndarray of shape (n_features, n_features)
The precision matrix of the covariance model to be tested.
Returns
-------
log_likelihood_ : float
Sample mean of the log-likelihood.
"""
p = precision.shape[0]
log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.0
return log_likelihood_
def empirical_covariance(X, *, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator.
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool, default=True
Specifies if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : array-like of shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance, check_finite=False)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like of shape (n_features, n_features)
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
return precision
def fit(self, X, y=None):
"""Fit the maximum liklihood covariance estimator to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian data set with `self.covariance_`.
Parameters
----------
X_test : array-like of shape (n_samples, n_features)
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
X_test = self._validate_data(X_test, reset=False)
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
"""Compute the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented"
)
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = self._validate_data(X, reset=False)
precision = self.get_precision()
with config_context(assume_finite=True):
# compute mahalanobis distances
dist = pairwise_distances(
X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
)
return np.reshape(dist, (len(X),)) ** 2
|
shyamalschandra/scikit-learn
|
sklearn/covariance/_empirical_covariance.py
|
Python
|
bsd-3-clause
| 11,053
|
[
"Gaussian"
] |
4aef5923a7fcce5b695ace98625c3ec0539d3cc6b57edb8ab3798e7ba89ce879
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 10000
n = 5000
display = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
A.QueueLocalUpdate( sLoc, s%width, 11 )
A.QueueLocalUpdate( sLoc, (s-1)%width, -1 )
A.QueueLocalUpdate( sLoc, (s+1)%width, 2 )
A.QueueLocalUpdate( sLoc, (s-height)%width, -3 )
A.QueueLocalUpdate( sLoc, (s+height)%width, 4 )
# The dense last column
#A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.ProcessQueues()
return A
A = Rectang(m,n)
b = El.DistMultiVec()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.NNLSCtrl_d()
ctrl.socpCtrl.mehrotraCtrl.progress = False
ctrl.socpCtrl.mehrotraCtrl.time = False
ctrl.socpCtrl.mehrotraCtrl.solveCtrl.progress = False
# Solve *with* resolving the regularization
ctrl.socpCtrl.mehrotraCtrl.resolveReg = True
startNNLS = El.mpi.Time()
x = El.NNLS( A, b, ctrl )
endNNLS = El.mpi.Time()
if worldRank == 0:
print "NNLS time (resolve reg.):", endNNLS-startNNLS, "seconds"
if display:
El.Display( x, "x" )
# Solve without resolving the regularization
ctrl.socpCtrl.mehrotraCtrl.resolveReg = False
startNNLS = El.mpi.Time()
x = El.NNLS( A, b, ctrl )
endNNLS = El.mpi.Time()
if worldRank == 0:
print "NNLS time (no resolve reg.):", endNNLS-startNNLS, "seconds"
if display:
El.Display( x, "x" )
e = El.DistMultiVec()
El.Copy( b, e )
El.Multiply( El.NORMAL, -1., A, x, 1., e )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x - b ||_2 =", eTwoNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares( A, b )
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "xLS" )
El.Copy( b, e )
El.Multiply( El.NORMAL, -1., A, xLS, 1., e )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
mcopik/Elemental
|
examples/interface/NNLS.py
|
Python
|
bsd-3-clause
| 2,419
|
[
"Gaussian"
] |
67a339644af77d01aa23580c6d81c67b68de4094405cf243d53364f87a8c3c4e
|
#!/usr/bin/env python
# -*- coding: utf-8
"""
Example generation modified from the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import re
import urllib2
import gzip
import posixpath
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
MAX_NB_LINES_STDOUT = 20
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=[], relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
'package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
modules_test = [cobj['module_short']] + self.extra_modules_test
for module in modules_test:
full_name = module + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif module in self._searchindex['objects']:
value = self._searchindex['objects'][module]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
break
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
url = False
for comb_name in ['%s.%s' % (module, cobj['name']) for module
in modules_test]:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip() for line in
docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 25px;
width: auto;
height: 250px;
width: 250px;
}
.figure img {
display: inline;
}
.figure .caption {
width: 230px;
text-align: center !important;
}
</style>
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.rst')):
print 80 * '_'
print ('Example directory %s does not have a README.rst file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.rst')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def sort_key(a):
# put last elements without a plot
if not a.startswith('plot') and a.endswith('.py'):
return 'zz' + a
return a
for fname in sorted(os.listdir(src_dir), key=sort_key):
if not os.path.split(fname)[-1].startswith('plot_'):
continue
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '_thumb.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['mne', 'matplotlib', 'numpy', 'scipy', 'mayavi']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = base_image_name + ".png"
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '_thumb.png')
time_elapsed = 0
if plot_gallery:
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime
<= os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
import matplotlib.pyplot as plt
plt.close('all')
try:
from mayavi import mlab
except Exception, e:
from enthought.mayavi import mlab
mlab.close(all=True)
cwd = os.getcwd()
try:
brain = None
global plt
global brain
execfile(example_file, globals())
facecolor = plt.gcf().get_facecolor() # hack to keep black bg
if facecolor == (0.0, 0.0, 0.0, 1.0):
plt.savefig(image_path, facecolor='black')
else:
plt.savefig(image_path)
brain.save_image(image_path)
brain.close()
figure_list = [image_path[len(image_dir):]]
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path)]
# generate thumb file
this_template = plot_rst_template
if os.path.exists(first_image_file):
make_thumbnail(first_image_file, thumb_file, 250, 250)
if not os.path.exists(thumb_file):
# use the default thumbnail
make_thumbnail('_static/pysurfer_logo_small.png', thumb_file, 250, 250)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['mne'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
doc_resolvers['mayavi'] = SphinxDocLinkResolver(
'http://docs.enthought.com/mayavi/mayavi',
extra_modules_test=['mayavi.mlab'])
example_dir = os.path.join(app.builder.srcdir, 'examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with open(full_fname, 'rt') as fid:
lines_in = fid.readlines()
fid.close()
with open(full_fname, 'wt') as fid:
for line in lines_in:
for name, link in str_repl.iteritems():
line = line.replace(name.encode('utf-8'),
link.encode('utf-8'))
fid.write(line)
fid.close()
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = 'build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
|
diego0020/PySurfer
|
doc/sphinxext/gen_rst.py
|
Python
|
bsd-3-clause
| 25,194
|
[
"Mayavi"
] |
27847c5db110b2d247ca5eda0f20463a00e845c5421dfc655311dcc616310d53
|
#! /usr/bin/env python
## Written for python2.6
import pysam
import random
import sys
if len(sys.argv) != 9:
print "\n**** USAGE ERROR **** \n"
print "python script.py number_iterations minimum_coverage maximum_coverage sites_file chromosome sampleid bamfile outfilename "
sys.exit("exiting ERROR !!!!")
#########################
###### ***** User specified input
#########################
Num_iter=int(sys.argv[1]) # Number of iterations
Min_cov=int(sys.argv[2]) # Min coverage to consider a site
Max_cov=int(sys.argv[3]) # Max coverage to consider a site
Sites_file=sys.argv[4] # This is a text file with just the position in it, one position per line.
chrom_sel = sys.argv[5] # The chromosome you are analysing. e.g. chr25
Sample = sys.argv[6] # ID of the sample in the bam file
samfile = pysam.Samfile(sys.argv[7], "rb") ## The bamfile you want to analyse
Outfilename = sys.argv[8]
if Min_cov < 4:
sys.exit(" ** Exiting error. You specified minimum coverage < 4. 4 is the minimum allowed")
### Outfile - open and write header
Outfile = open(Outfilename, 'w')
Outfile.write('#chromosome\titeration_number\tcount_excluded_triallelic_sites\tcount_exclued_error_sites\tcount_sites_excluded_for_lowhighcoverage\tcount_parsed_sites_from_sitesfile\ttype_of_site\tSampleID\tFourSite_count_parsed_sites_calcbasedon\tFoursite_count_4readsSame\tFoursite_count_3RdSame1Diff\tFoursite_count_2readsame2readdif\n')
#########################
###### ***** Get sites of interest
#########################
## Sites file has the list of sites you are interested in looking at. There is one file per chromosome. Read in the sites as a list then make it a set as this is more efficient
f = open(Sites_file, "rU")
listofsites = [int(x) for x in f]
sites = set(listofsites)
#########################
###### ***** Counters
#########################
count_iterations=0
count_screen_sites=0 # total num sites looked at - inc sites with too high coverage/N's
count_exc_cov=0 # count of sites excluded due to low or high coverage
count_error=0 # error check - if this is not 0, something went wrong
count_triallelic=0 # sites where number of bases > 2 after excluding N's
## Foursite counters
FS_4RdSame=0
FS_3RdSame1Diff=0
FS_2RdSame2Diff=0
FS_Ctsitesparse=0
#########################
###### ***** Functions
#########################
def site_homo(bases):
"checks to see if the bases are identical or not"
return all(x == bases[0] for x in bases)
#########################
###### ***** Main Script
#########################
## Check of position is site of interest - if not skip that site (may not use this if conduct computations only on sites of interest)
## For each site of interest, append all of the calls from the different reads to the list 'calls'
## Start counting sites
while count_iterations < Num_iter:
count_iterations += 1
for pileupcolumn in samfile.pileup(chrom_sel):
calls=[]
if pileupcolumn.pos not in sites:
# if pileupcolumn.pos == 0: ## You would use this if you wanted the whole chromosome
pass
else:
## get the bases for a position & store it in calls
for pileupread in pileupcolumn.pileups:
calls.append(pileupread.alignment.seq[pileupread.qpos])
count_screen_sites += 1
### List comprehension to remove N's from the list of potential calls to sample
calls = [y for y in calls if y != 'N']
num_bases=set(calls)
## Skip sites where coverage is too low or too high - count the number of these sites
## Skip sites if they are triallelic and count these sites
## Else randomly sample 4 reads from calls
if len(calls) > Max_cov or len(calls) < Min_cov:
count_exc_cov += 1
elif len(num_bases) > 2:
count_triallelic += 1
elif len(num_bases) <= 0:
count_error += 1
else:
fourreads=random.sample(calls,4)
## Count if the call is the same, not the same, or if it isn't the same, different or an N (i.e. an error check)
if len(fourreads) != 4:
sys.exit("***ERROR exiting - script is meant to be written to sample four reads, but more were sampled. Something is wrong - bug?")
else:
FS_Ctsitesparse+=1
D_fourreads={}
## This makes a dicitonary of what is in your four reads e.g. {'A': 3, 'C': 1}
for item in fourreads:
D_fourreads[item]=D_fourreads.get(item, 0) + 1
## Extract out the dictionary values e.g. [3,1] for the above example
val_fourreads=D_fourreads.values()
key_fourreads=D_fourreads.keys()
### Four reads the same [A,A,A,A], [4]
if val_fourreads == [4]:
FS_4RdSame+=1
### Two reads same : Two reads same e.g. [A,A,C,C] = [2,2]
elif val_fourreads == [2,2]:
FS_2RdSame2Diff+=1
### Three reads same : One diff e.g. [A,A,A,C] = [1,3] or [3,1]
elif val_fourreads == [3,1] or val_fourreads == [1,3]:
FS_3RdSame1Diff+=1
else:
print "Your read dictionary: ", D_fourread
print "Your read values: ", val_fourreads
sys.exit("***ERROR exiting - We extracted four reads, and counted whether they were all the same, 3 reads one base, 1 read another base OR 2 reads one base 2 reads another base. You are none of these. Something is wrong - bug?")
Outfile.write(str(chrom_sel) + "\t" + str(count_iterations) + "\t" + str(count_triallelic) + "\t" + str(count_error) + "\t" + str(count_exc_cov) + "\t" + str(count_screen_sites) + "\t" + str(Sites_file) + '\t' + str(Sample) + '\t' + str(FS_Ctsitesparse) + '\t' + str(FS_4RdSame) + '\t' + str(FS_3RdSame1Diff) + '\t' + str(FS_2RdSame2Diff) + '\n')
## reset the counters
count_screen_sites=0 # total number of sites looked at - including ones with too high coverage, or N's
count_exc_cov=0 # count of sites excluded due to low or high coverage
count_error=0 # error check - if this is not 0, something went wrong
count_triallelic=0
FS_4RdSame=0
FS_3RdSame1Diff=0
FS_2RdSame2Diff=0
FS_Ctsitesparse=0
samfile.close()
Outfile.close()
|
LohmuellerLab/FourSite
|
CountReadFoursite/CountReadFoursite.py
|
Python
|
mit
| 5,921
|
[
"pysam"
] |
f7e625edf836749560ddf04b3805bad4153ee449aeb98fe5663c0ff822669c93
|
"""Provide functions for pruning reactions, metabolites and genes."""
from ast import And, BoolOp, Module, Name, NodeTransformer
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
from warnings import warn
from cobra.core import GPR
if TYPE_CHECKING:
from cobra import Gene, Metabolite, Model, Reaction
def prune_unused_metabolites(model: "Model") -> Tuple["Model", List["Metabolite"]]:
"""Remove metabolites not involved in any reactions.
Parameters
----------
model: cobra.Model
The model to remove unused metabolites from.
Returns
-------
cobra.Model
Input model with unused metabolites removed.
list of cobra.Metabolite
List of metabolites that were removed.
"""
output_model = model.copy()
inactive_metabolites = [
m for m in output_model.metabolites if len(m.reactions) == 0
]
output_model.remove_metabolites(inactive_metabolites)
return output_model, inactive_metabolites
def prune_unused_reactions(model: "Model") -> Tuple["Model", List["Reaction"]]:
"""Remove reactions with no assigned metabolites, returns pruned model.
Parameters
----------
model: cobra.Model
The model to remove unused reactions from.
Returns
-------
cobra.Model
Input model with unused reactions removed.
list of cobra.Reaction
List of reactions that were removed.
"""
output_model = model.copy()
reactions_to_prune = [r for r in output_model.reactions if len(r.metabolites) == 0]
output_model.remove_reactions(reactions_to_prune)
return output_model, reactions_to_prune
def undelete_model_genes(model: "Model") -> None:
"""Undo the effects of a call to `delete_model_genes` in place.
Parameters
----------
model: cobra.Model
The model which will be modified in place.
"""
if model._trimmed_genes is not None:
for x in model._trimmed_genes:
x.functional = True
if model._trimmed_reactions is not None:
for (
the_reaction,
(lower_bound, upper_bound),
) in model._trimmed_reactions.items():
the_reaction.lower_bound = lower_bound
the_reaction.upper_bound = upper_bound
model._trimmed_genes = []
model._trimmed_reactions = {}
model._trimmed = False
def get_compiled_gene_reaction_rules(model: "Model") -> Dict["Reaction", Module]:
"""Generate a dictionary of compiled gene-reaction rules.
Any gene-reaction rule expression which cannot be compiled or do not
evaluate after compiling will be excluded. The result can be used in the
`find_gene_knockout_reactions` function to speed up evaluation of these
rules.
Parameters
----------
model: cobra.Model
The model to get gene-reaction rules for.
Returns
-------
dict of cobra.Reaction, ast.Module
The dictionary of cobra.Reaction objects as keys and ast.Module
objects as keys.
.. deprecated::
Internal function that has outlived its purpose.
"""
warn(
"The function `get_compiled_gene_reaction_rules` has outlived its purpose. "
"It will be removed soon.",
DeprecationWarning,
)
return {r: r.gpr for r in model.reactions}
def find_gene_knockout_reactions(
model: "Model",
gene_list: List["Gene"],
compiled_gene_reaction_rules: Optional[Dict["Reaction", Module]] = None,
) -> List["Reaction"]:
"""Identify reactions which will be disabled when genes are knocked out.
Parameters
----------
model: cobra.Model
The model for which to find gene knock-out reactions.
gene_list: list of cobra.Gene
The list of genes to knock-out.
compiled_gene_reaction_rules: dict of {reaction: compiled_string},
optional
If provided, this gives pre-compiled gene-reaction rule strings.
The compiled rule strings can be evaluated much faster. If a rule
is not provided, the regular expression evaluation will be used.
Because not all gene-reaction rule strings can be evaluated, this
dict must exclude any rules which can not be used with eval
(default None).
Returns
-------
list of cobra.Reaction
The list of cobra.Reaction objects which will be disabled.
.. deprecated:: 0.22.1
Internal function that has outlived its purpose.
"""
warn(
"The function `find_gene_knockout_reactions` has outlived its purpose. "
"It will be removed in the next minor version (0.23.0).",
DeprecationWarning,
)
potential_reactions = set()
for gene in gene_list:
if isinstance(gene, str):
gene = model.genes.get_by_id(gene)
potential_reactions.update(gene._reaction)
gene_set = {str(i) for i in gene_list}
if compiled_gene_reaction_rules is None:
compiled_gene_reaction_rules = {r: r.gpr for r in potential_reactions}
return [
r
for r in potential_reactions
if not compiled_gene_reaction_rules[r].eval(gene_set)
]
def delete_model_genes(
model: "Model",
gene_list: Union[List["Gene"], Set["Gene"], List[str], Set[str]],
cumulative_deletions: bool = True,
disable_orphans: bool = False,
) -> None:
"""Temporarily remove the effect of genes in `gene_list`.
It sets the bounds to "zero" for reactions catalysed by the genes in
`gene_list` if deleting the genes stops the reactions from proceeding.
Parameters
----------
model: cobra.Model
The model whose reaction bounds are to be set.
gene_list: list of cobra.Gene
The list of genes to knock-out.
cumulative_deletions: bool, optional
If True, then any previous deletions will be maintained in the
model (default True).
disable_orphans: bool, optional
If True, then orphan reactions will be disabled. Currently, this
is not implemented (default False).
"""
if disable_orphans:
raise NotImplementedError("disable_orphans not implemented")
if not hasattr(model, "_trimmed"):
model._trimmed = False
model._trimmed_genes = []
model._trimmed_reactions = {} # Store the old bounds in here.
# older models have this
if model._trimmed_genes is None:
model._trimmed_genes = []
if model._trimmed_reactions is None:
model._trimmed_reactions = {}
# Allow a single gene to be fed in as a string instead of a list.
if not hasattr(gene_list, "__iter__") or hasattr(
gene_list, "id"
): # cobra.Gene has __iter__
gene_list = [gene_list]
if not hasattr(gene_list[0], "id"):
if gene_list[0] in model.genes:
tmp_gene_dict = dict([(x.id, x) for x in model.genes])
else:
# assume we're dealing with names if no match to an id
tmp_gene_dict = dict([(x.name, x) for x in model.genes])
gene_list = [tmp_gene_dict[x] for x in gene_list]
# Make the genes non-functional
for x in gene_list:
x.functional = False
if cumulative_deletions:
gene_list.extend(model._trimmed_genes)
else:
undelete_model_genes(model)
for the_reaction in find_gene_knockout_reactions(model, gene_list):
# Running this on an already deleted reaction will overwrite the
# stored reaction bounds.
if the_reaction in model._trimmed_reactions:
continue
old_lower_bound = the_reaction.lower_bound
old_upper_bound = the_reaction.upper_bound
model._trimmed_reactions[the_reaction] = (
old_lower_bound,
old_upper_bound,
)
the_reaction.lower_bound = 0.0
the_reaction.upper_bound = 0.0
model._trimmed = True
model._trimmed_genes = list(set(model._trimmed_genes + gene_list))
class _GeneRemover(NodeTransformer):
"""
Class to represent a gene set remover.
Parameters
----------
target_genes: list or set of cobra.Gene
A set of genes to be removed.
"""
def __init__(self, target_genes: Set["Gene"], **kwargs) -> None:
"""Initialize a new object.
Other Parameters
----------------
kwargs:
Further keyword arguments are passed on to the parent class.
"""
super().__init__(**kwargs)
self.target_genes = {str(i) for i in target_genes}
def visit_Name(self, node: "Name") -> Optional["Name"]:
"""Remove a gene.
Parameters
----------
node: ast.Name
The gene to remove.
Returns
-------
cobra.Gene or None
None if gene object is in `target_genes`.
"""
return None if node.id in self.target_genes else node
def visit_BoolOp(self, node: "BoolOp") -> Optional[Union["BoolOp", "Name"]]:
"""Rules for boolean operations.
Parameters
----------
node: ast.Name
The gene to apply rules to.
Returns
-------
ast.Name or None
None if size of Or node values is zero after applying rule,
or size of And node values is lower after applying rule.
"""
original_n = len(node.values)
self.generic_visit(node)
if len(node.values) == 0:
return None
# AND with any entities removed
if len(node.values) < original_n and isinstance(node.op, And):
return None
# if one entity in an OR was left, just that entity passed up
if len(node.values) == 1:
return node.values[0]
return node
def remove_genes(
model: "Model",
gene_list: Union[List["Gene"], Set["Gene"], List[str], Union[str]],
remove_reactions: bool = True,
) -> None:
"""Remove genes entirely from the model.
This will also simplify all gene-reaction rules with the genes
inactivated.
Parameters
----------
model: cobra.Model
The model to remove genes from.
gene_list: list of cobra.Gene or gene ids
The list of gene objects to remove.
remove_reactions: bool, optional
Whether to remove reactions associated with genes in `gene_list`
(default True).
"""
gene_set = {model.genes.get_by_id(str(i)) for i in gene_list}
gene_id_set = {i.id for i in gene_set}
remover = _GeneRemover(gene_id_set)
target_reactions = []
for rxn in model.reactions:
if rxn.gene_reaction_rule is None or len(rxn.gene_reaction_rule) == 0:
continue
# reactions to remove
if remove_reactions and not rxn.gpr.eval(gene_id_set):
target_reactions.append(rxn)
else:
# if the reaction is not removed, remove the gene
# from its gpr
remover.visit(rxn.gpr)
if "body" not in rxn.gpr.__dict__.keys():
rxn.gpr = GPR()
rxn._update_genes_from_gpr()
for gene in gene_set:
model.genes.remove(gene)
# remove reference to the gene in all groups
associated_groups = model.get_associated_groups(gene)
for group in associated_groups:
group.remove_members(gene)
model.remove_reactions(target_reactions)
|
opencobra/cobrapy
|
src/cobra/manipulation/delete.py
|
Python
|
gpl-2.0
| 11,356
|
[
"VisIt"
] |
79f0471db8622f9bbd62d64c21d58c1ae6f88224bdfdb83394632dcc2eaa5a57
|
import numpy as np
from math import sqrt
import time, operator
from ase.optimize import Dynamics
from ase import BFGS
from ase.optimize.gxoptimizer import GxOptimizer
class LiuTSsearch(Dynamics):
def __init__(self, atoms, restart=None, logfile='-' , trajectory=None, soften = 0, factsoft = 0.7,
outer_optimizer=BFGS, finish_optimizer = GxOptimizer, opt_args=None, relax_max=2, liuconstr = None,
switchfinish = True, treat1 = True, treat2 = True, additionalcoords = [1, 2] ):
"""Structure optimizer object.
source of the code is the following paper:
H.-F. Wang and Z.-P. Liu: Comprehensive Mechanism and Structure-Sensitivity of Ethanol
Oxidation on Platinum: New Transition-State Searching Method for Resolving the Complex
Reaction Network, JACS 130 (2008), 10996
atoms: Atoms object
The Atoms object to relax.
restart: str
Filename for restart file. Default value is *None*.
logfile: file object or str
If *logfile* is a string, a file with that name will be opened.
Use '-' for stdout.
trajectory: Trajectory object or str
Attach trajectory object. If *trajectory* is a string a
PickleTrajectory will be constructed. Use *None* for no
trajectory.
outer_optimizer: optimizer for the "Broyden steps"
opt_args: **opt_args will be given to the outer optimizer, so if
some addiditional arguments are wanted
relax_max: after this amount of steps (integer) an update step
is performed
liuconstr: constraint object
treat1: if true calculate with treatment 1
treat2: if true calculate with treatment 2
soften: there are different implementations of treatment 2, soften
chooses which one to take
factsoft: treatment 2 wants a value, saying how much of change considerd
will be used for adjusting the neighboring atoms, different meaning
for the different soften values
switchfinish: for the finishing of the calculation (if already near TS) there
may be the wanting for another (faster) transition state searcher
finish_optimizer: optimizer which finishes after switchforfinish, will be used after
three update and four additional relaxation steps without any
visible change to the coordinates
"""
Dynamics.__init__(self, atoms, logfile, trajectory)
# set optimizer
if opt_args == None:
self.dyn = outer_optimizer( atoms )
else:
# set optimizer with user given variables
self.dyn = outer_optimizer( atoms, **opt_args)
self.restart = restart
self.relax_max = relax_max
self.stepsmax = None
self.dyn.initialize()
self.liuconstr = liuconstr
self.treat1 = treat1
self.treat2 = treat2
self.soften = soften
self.factsoft = factsoft
self.switchfinish = switchfinish
self.finish_optimizer = finish_optimizer
self.logout = open('liu.output','w')
self.adcoord = additionalcoords
def setliuconstr(self, liuconstr):
self.liuconstr = liuconstr
def run(self, fmax=0.05, smax = 0.05, steps=100000000):
"""Run structure transition state search algorithm from Liu.
This method will return when the forces on all individual
atoms are less than *fmax* and stepsize are less than *smax*
or when the number of steps exceeds
*steps*."""
self.fmax = fmax
self.smax = smax
step = 0
switch_finish = 0
relaxorup = 0
# next one for counting the relaxation steps
step_relax = 1
self.logout.write('Transition State search after algorithm of Liu et al.\n')
self.logout.write('Parameter for system %s \n' % (self.atoms.get_name()) )
self.logout.write('iteration energy (eV) max. force (eV/A) max step (A) react.coord. add.cord. dist %s \n' % (str(self.adcoord) ) )
self.writegeometry( self.atoms.get_positions() , second = 0)
while step < steps:
f = self.atoms.get_forces()
oldval = self.atoms.get_positions()
# perform actual Broydens step proposal
# and set steps to it
self.dyn.step(f)
newval = self.atoms.get_positions()
stepdiff = newval - oldval
# end iterating if converged, compare forces and stepsize for it
# sets also stepsmax to maximal value of steps
if self.converged(f, stepdiff):
print "System is converged, calcualtion stopped!"
return
# write some output
self.log(f, self.stepsmax)
# second part of liu algorithm, update every relax_max steps
# or if the steps don't change anything
# else only relax (set constraint back to old value)
# what is actual constraint is decided by the Liuc...
if (step_relax == self.relax_max or (self.stepsmax < self.smax)):
self.dyn.initialize()
if (self.treat2):
# for treatment two there have to be some values stored
# (atomic network) before update could be performed
self.__treat2_init(newval, self.soften)
# print "before treatment2", newval
#self.writegeometry( newval)
# updates the reaction coordinate(s) and gives back the change of
# the atoms related to this (for treatment 2)
change = self.liuconstr.update(oldval, newval, f)
if (self.treat2):
#self.writegeometry(newval)
# treatment 2 adjusts the atoms neighboring the ones changed in
# update, there are several possibilities of doing so, some require
# the knowledge of the actual change, others require only the knowledge
# of the old distances (stored in atomicnetwork)
self.treatment2(newval, self.soften, self.factsoft, change)
#self.writegeometry( newval)
#print "after treatment2", newval
step_relax = 0
relaxorup = 2
else:
# in relaxation the "constraint" only has to be reset, so for example in LiuCbond
# the bond length between atomA and atomB, treatment 1 includes the mean square forces
# to decide which atom to move more
self.liuconstr.relaxation( oldval, newval, f, self.treat1)
relaxorup = 1
# the positions now have to given back to the atoms object
self.atoms.set_positions(newval)
# some more output
self.writegeometry( self.atoms.get_positions())
self.logreact(newval, relaxorup)
self.call_observers()
# steps have to count up, self.nsteps for output, step for the steps restriction
# and step_relax to know if next iteration is relaxation or update iteration
self.nsteps += 1
step += 1
step_relax += 1
# if the algorithm is near (enough) to the transition state, a faster other
# transition state searcher may be used, so far this will happen if after three
# update steps and 4 relaxation steps the steps have not finish much on the values
if (self.switchfinish):
if (self.stepsmax < self.smax):
switch_finish += 1
else:
switch_finish = 0
if (switch_finish > self.relax_max * 3 + 4 ):
self.switchforlast(steps - step)
break
print "maximum number of steps exceeded, calculation stopped"
def converged(self, forces=None, stepdiff = None):
"""Did the optimization converge?"""
# this if for convergence test without steps,then
# stepsmax should always be below smax
self.stepsmax = self.smax / 100
if forces is None:
forces = self.atoms.get_forces()
if stepdiff is not None:
self.stepsmax = (stepdiff ** 2).sum(axis=1).max()
return ((forces ** 2).sum(axis=1).max() < self.fmax ** 2 and self.stepsmax < self.smax )
def log(self, forces, stmax):
fmax2 = sqrt((forces**2).sum(axis=1).max())
e = self.atoms.get_potential_energy()
T = time.localtime()
if self.logfile is not None:
name = self.__class__.__name__
self.logfile.write('%s: %3d %02d:%02d:%02d %15.6f %12.4f %12.4f\n' %
(name, self.nsteps, T[3], T[4], T[5], e, fmax2, stmax))
self.logfile.flush()
self.logout.write('%3d %15.6f %15.8f %15.8f' % (self.nsteps, e, fmax2, stmax))
def logreact(self, newval, relaxorup):
centers = self.liuconstr.centerofnetwork()
for count in range(len(centers) / 2):
self.logout.write('%18.12f' % self.distance(newval[centers[count]], newval[centers[count + 1]]))
for count2 in range(len(self.adcoord) / 2):
self.logout.write('%16.10f' % self.distance(newval[self.adcoord[count2] - 1], newval[self.adcoord[count2 + 1] - 1]))
if (relaxorup == 1):
self.logout.write(' r\n')
else:
self.logout.write(' u\n')
def switchforlast(self, steps):
self.dyn = self.finish_optimizer(self.atoms)
self.dyn.initialize()
self.dyn.run(self.fmax, steps)
def dump(self, data):
if rank == 0 and self.restart is not None:
pickle.dump(data, open(self.restart, 'wb'), protocol=2)
def load(self):
return pickle.load(open(self.restart))
def writegeometry(self, new, fileout = 'geodata.xyz' , second = 1):
# writes geometry in xyz file
print "output to geodata.xyz"
syms = self.atoms.get_chemical_symbols()
if fileout == None:
writegeo = stdout.write
else:
if fileout.endswith('.xyz'):
pass
else :
fileout = fileout + '.xyz'
if second==0:
writegeo = open(fileout,"w").write
else:
writegeo = open(fileout,"a").write
# firsts lines give number of atoms and comment
writegeo("%i \n" % (len(syms) ) )
writegeo("cartesian geometry in Angstrom\n")
for num, pos in enumerate(new):
writegeo("%2s " % syms[num])
writegeo("%22.12f %22.12f %22.12f \n" % (pos[0], pos[1], pos[2]) )
def __treat2_init( self, new, soften):
# builds up the networks for the centers given by the LiuC class
centers = self.liuconstr.centerofnetwork()
self.networks = [[] for inum in range(len(centers))]
self.memberinshell = [[] for inum in range(len(centers))]
for inum, center in enumerate(centers):
self.networks[inum], self.memberinshell[inum] = self.atomicnetwork(center, new)
# print self.memberinshell[inum][0]
# eliminate other centers out of list of other shells
for cen in centers:
self.networks[inum][cen].shell = 0
if (soften < 0):
self.allnetwork = self.mergenetworks(self.networks, centers)
def treatment2( self, new, soften, p, ch, cutoff = None):
'''the actual treatment2, changes *new* positions with the help of some factors
to adjust the positions of the other atoms
so far there are three different ways for that:
consider the change for atom C in shell s (smallest distance of atoms to center of network)
p is a factor to damp the changes
example is for bond length between A and B
parameters indiced with A and B belong to the coresponding atom, those indiced with AB belong
to the dimer AB
Ni is the next neighbor of C (there may be several, therefore i), which is on the path to A or B
(is in shell s-1 and has a bond to C), n is the number of this neighbors
soften = 0:
rC' = rC + p^ s_A * r(A'A) + p^ s_b * r(B'B)
soften = 1:
rC' = rC + (1 - p * s_A) * r(A'A) + (1 - p * s_B) * r(B'B)
if one of the (1 - p * s) is smaller than 0 this factor is omited
soften = -1:
rC' = rC + p ^ s_AB / n * sum_i( (|r(CNi')|- |r(CNi)| )e_r(CNi') )
'''
if (soften < 0):
# soften = -1, maxshell: only consider up to this shell
maxshell = 0
for memberinshell in self.memberinshell:
# in the memberinshells for the atomic networks of the atoms the
# length is the amount of shells available + 1 for the 0'th shell
maxshell = max(maxshell, len(memberinshell))
maxshell -= 1
if (cutoff != None):
# or set maxshell to the cutoff if only wanted to this shell
maxshell = min(cutoff, maxshell)
if (soften < -1):
for i in range(len(self.allnetwork)):
# i runs over all atoms, only those are considerd which are wanted
# (shell smaller eventually set maxshell, shell + 0 would mean this atom
# is not connected to the atom of which this network is, or the atom is one
# of the network center atoms, which won't be adjusted further, as they have been reset
# in the update function)
if (self.allnetwork[i].shell > 0 and self.allnetwork[i].shell < maxshell):
print "change of", self.allnetwork[i].number, "of shell", self.allnetwork[i].shell
print "old value", new[self.allnetwork[i].number-1]
print self.allnetwork[i].center, self.liuconstr.centerofnetwork()
for inum, center in enumerate(self.liuconstr.centerofnetwork()):
if ( center == (self.allnetwork[i].center -1) ):
new[self.allnetwork[i].number-1] += self._adjustas(ch[inum] , soften, p, self.allnetwork[i].shell)
print "update around center", center + 1, " with adjustment", ch[inum]
break
print "new value", new[self.allnetwork[i].number-1]
else:
for s in range(maxshell):
# s goes over the shells, because this update has to be made shellwise
for i in range(len(self.allnetwork)):
# i goes over all atoms, but only those wich have the correct shell, looked at the moment
# will be considerd further
if (self.allnetwork[i].shell == s + 1):
# there may be several atoms to which the distances should be fixed, they are in the
# nextN list of the atom
for k, smo in enumerate(self.allnetwork[i].nextN):
distold = self.allnetwork[i].dist_to_next[k]
print "change of", self.allnetwork[i].number, "of shell", self.allnetwork[i].shell
print "old value", new[self.allnetwork[i].number-1]
# this update considers the change in the bond length of the corresponding atoms
dt = self.vect_of_change(new[self.allnetwork[i].number-1], new[smo -1], distold)
print dt
# actual adjustment
new[self.allnetwork[i].number-1] += self._adjustas(dt / self.allnetwork[i].multiplicity, soften, p, s + 1)
print "new value", new[self.allnetwork[i].number-1]
else:
# the other cases (soften 0 and 1) are very similar and could be considerd together
for inum, network in enumerate(self.networks):
# inum counts the atoms to be changed, network is the atomic network of them,
# as here the changes from different atoms just sum up, each of them can be considerd one after another
maxshell = len(self.memberinshell[inum])+1
# maybe a cutoff for shells with to high numbers is wanted
if (cutoff != None ):
maxshell = min(cutoff, maxshell)
for i in range(len(network)):
# i runs over all atoms, only those are considerd which are wanted
# (shell smaller eventually set maxshell, shell + 0 would mean this atom
# is not connected to the atom of which this network is, or the atom is one
# of the network center atoms, which won't be adjusted further, as they have been reset
# in the update function)
if (network[i].shell > 0 and network[i].shell < maxshell):
# print "change of", network[i].number, "of shell", network[i].shell
# print "old value", new[network[i].number-1]
new[network[i].number-1] += self._adjustas(ch[inum] , soften, p, network[i].shell)
# print "new value", new[network[i].number-1]
def vect_of_change(self, pos1, pos2, dold):
# vector from atom to be changed to atom to be fix
# lengt of vector is difference between old and new bond length
vec = pos2 - pos1
dnew = self.distance(pos1, pos2)
vec *= (dnew - dold)/dnew
return vec
def _adjustas(self, dt, soften, p, shell):
# decides how much the positons of the actual atom are changed
if (soften < 1):
# update for soften = 0 and soften = -1, softfact ^ shell is used
return ( p ** shell ) * dt
else:
# update for soften = 1, here only positiv changes are considerd
if ( p * shell > 1.0 ):
return (1.0 - p * shell) * dt
else:
return 0.0
def atomicnetwork(self, center, pos, cutoff = None):
'''builds an atomic network around atom *center* (position in atomics list)
pos are the positions considerd
for a given cutoff, the network will stop with shell = cutoff -1
the informations stored are explaned in the netinfo class, which gives the
format they are stored in
'''
# atomic network objects are stored in netinfo class in variable atnet
# (will be given back)
atnet = []
# memberinshell counts the members for a given shell, the value
# memberinshell[0] stores the amount of all atoms (except center) negativ for better handling
# (will be given back)
memberinshell = [ - (len(pos) - 1) ]
# for the radii one needs to know what atomic numbers the atoms have
atnums = self.atoms.get_atomic_numbers()
# There will now be stored all the atoms with initial value for number, there distance to
# the center atom of the network and their radius, needed further on
for i in range(len(pos)):
d = self.distance(pos[i], pos[center])
atnet.append(netinfo(i+1, dist_to_center = d, radius = self.__giveradius( atnums[i]), center = (center + 1) ))
# start calculating the first shell
# memberinshell needs new value for members of shell 1
memberinshell.append(0)
# for i in range(len(atnet)):
# print atnet[i]
ra = atnet[center].radius
# find out if atom i is in first shell ( d(i center) < radius(i) + radius(center))
# if true, write shell 1 in atnet of atom i, multiplicity is always 1, because there is only one
# atom in shell "0"
# atom center should be in shell 1 too after this calculation, but that will be changed lateron
for i in range(len(atnet)):
if( atnet[i].dist_to_center <= (ra + atnet[i].radius)):
atnet[i].shell = 1
atnet[i].multiplicity = 1
atnet[i].nextN = [center +1]
atnet[i].dist_to_next = [atnet[i].dist_to_center]
memberinshell[1] += 1
# if every atom left is in its own shell (one big line), the maximum number of shells is reached
# there is no use in calculating more, if cutoff exists it replaces maxshell
maxshell = -memberinshell[0] - memberinshell[1] + 1
if (cutoff != None ):
maxshell = min(cutoff - 2, maxshell)
if (maxshell < 0):
maxshell = 0
print "maximum shell considerd reduced to", maxshell
# calculate all the others shells one ofter another, consider j starts with 0, but the first shell considered
# is shell number 2 (j+2)
for j in range( maxshell):
# to store the number of atoms in this shell membershell needs j+2'th number
memberinshell.append(0)
for i in range(len(atnet)):
# as all atoms start in shell 0, the atoms who have not yet any shell assigned are in shell 0
if(atnet[i].shell < 1):
# make sure the other starting values are correct for unassigned atom
atnet[i].multiplicity = 0
atnet[i].nextN = []
atnet[i].dist_to_next = []
# need to consider all atoms from actual shell -1 as potential direct neigbors for this atom
for k in range(len(atnet)):
if(atnet[k].shell == j+1 ):
d = self.distance(pos[atnet[i].number-1], pos[atnet[k].number-1])
# there exist a bond to the considerd atom if following condition is fullfilled:
if ( d <= (atnet[i].radius + atnet[k].radius)):
# if at least one bond exists, the shell of atom i has been found (j + 2 because start
# with shell 2 but j = 0), as there may be several atoms in shell j +1 which are conected
# to atom i, the multipllicity will be increased
atnet[i].shell = j + 2
atnet[i].multiplicity += 1
# print i, k, atnet[i].number, atnet[k].number
# store for further usage the number (identifier) and distance (to) of the atom which gave
# the last bond
atnet[i].nextN.append(atnet[k].number)
atnet[i].dist_to_next.append(d)
# if there exist at least one bond, the shell has a new member
if (atnet[i].multiplicity > 0):
memberinshell[j + 2] += 1
# there is no possibility that after an empty shell in the next shell there will be found an atom
# also it is no use to go on if all atoms have found their shell
# (remember memberinshell[0] = -(sum of all atoms -1))
if (memberinshell[j+2] == 0 or (sum(memberinshell) >= 1) ):
break
# the actual code puts the center atom itsself in the first shell, this sets it back
atnet[center].shell = 0
memberinshell[1] -= 1
#print 'Data from network of atom', center+1
#for i in range(len(atnet)):
# print atnet[i].number, atnet[i].dist_to_center, atnet[i].shell, atnet[i].multiplicity, atnet[i].nextN, atnet[i].dist_to_next
#print memberinshell
# return network and shelldistribution
return atnet, memberinshell
def mergenetworks(self, networks, centers):
# merge networks for single atoms to one for several atoms all together
for k, network in enumerate(networks):
if (k == 0):
# just start with the first network
mergednetwork = network
else:
for i in range(len(mergednetwork)):
# if the shell for the atom is smaller in the compared network it is taken (only for shells > 0), because
# 0 resembles no connection or center atom), if atom has a shell in new network but not in old one, new one
# is taken
if ((network[i].shell > 0 and network[i].shell < mergednetwork[i].shell) or (mergednetwork[i].shell == 0)):
mergednetwork[i] = network[i]
elif ((network[i].shell > 0 and network[i].shell == mergednetwork[i].shell)):
mergednetwork[i].multiplicity += network[i].multiplicity
mergednetwork[i].nextN = mergednetwork[i].nextN + network[i].nextN
mergednetwork[i].dist_to_next = mergednetwork[i].dist_to_next + network[i].dist_to_next
mergednetwork[i].center = 0
# the shell of the centers of the network should always be 0, as they should'nt be changed further
for center in centers:
mergednetwork[center].shell = 0
print 'Data from merged network of atom'
for i in range(len(mergednetwork)):
print mergednetwork[i].number, mergednetwork[i].dist_to_center, mergednetwork[i].shell, mergednetwork[i].multiplicity, mergednetwork[i].nextN, mergednetwork[i].dist_to_next
return mergednetwork
def distance(self, atoma, atomb):
# gives back the distance between atoma and atomb
avec = atoma - atomb
return sqrt(np.dot(avec, avec))
def __giveradius(self, z):
# gives the radius back
# stored values are in bohrs, therefore the factor
# they are listed in order of the atomic number
gradius = [ None,
0.3200, 0.9300, 1.2300, 0.9000, 0.8200,
0.7700, 0.7500, 0.7300, 0.7200, 0.7100,
1.5400, 1.3600, 1.1800, 1.1100, 1.0600,
1.0200, 0.9900, 0.9800, 2.0300, 1.7400,
1.4400, 1.3200, 1.2200, 1.1800, 1.1700,
1.1700, 1.1600, 1.1500, 1.1700, 1.2500,
1.2600, 1.2200, 1.2000, 1.1600, 1.1400,
1.1200, 2.1600, 1.9100, 1.6200, 1.4500,
1.3400, 1.3000, 1.2700, 1.2500, 1.2500,
1.2800, 1.3400, 1.4800, 1.4400, 1.4100,
1.4000, 1.3600, 1.3300, 1.3100, 2.3500,
1.9800, 1.6900, 1.6500, 1.6500, 1.6400,
1.6300, 1.6200, 1.8500, 1.6100, 1.5900,
1.5900, 1.5800, 1.5700, 1.5600, 1.5600,
1.5600, 1.4400, 1.3400, 1.3000, 1.2800,
1.2600, 1.2700, 1.3000, 1.3400, 1.4900,
1.4800, 1.4700, 1.4600, 1.4600, 1.4500,
1.0000, 1.0000, 1.0000, 1.0000, 1.6500,
1.0000, 1.4200, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 0.8000,
1.0000, 1.0000, 1.0000 ]
return gradius[z]/ 0.529177
class LiuCBond:
"""Reaction coordinate of Liu algorithm is a
bond length
Treatment 1 and 2 are also considered
atomA, atomB are the numbers for the atoms whose bond length is the reaction coordinate
treat1, treat2 are treatment 1 and 2
damp_factor dams update, corresponds to beta in the paper
"""
def __init__(self, atomA, atomB, damp_factor = 0.8):
# lists in python start with o element, so the n'th atom is at position n-1 !!
self.atoma = atomA-1
self.atomb = atomB-1
self.beta = damp_factor
def relaxation(self, old, new, forces, t1):
# positions of atoms a and b are reset to old values
pa = old[self.atoma]
pb = old[self.atomb]
d = pa - pb
p = sqrt(np.dot(d, d))
print "relaxation to value", p
self.adjust_positions( new, forces, p, t1)
def update(self, old, new, forces):
# positions of atoms a and b are set to a new value
# but not the one the minimizator calculated
# change in coordinates is given back for possible
# treatment 2
pa = old[self.atoma]
pb = old[self.atomb]
d = pa - pb
p = sqrt(np.dot(d, d))
qa = new[self.atoma].copy()
qb = new[self.atomb].copy()
d = qa -qb
q = sqrt(np.dot(d, d))
p -= self.beta * (q - p)
print "update to value", p
self.adjust_positions( new, forces, p, False)
return [ new[self.atoma] - qa, new[self.atomb] -qb]
def adjust_positions(self, new, forces, dt, t1):
# actually move the atoms a and b in such a way,
# that there distance is dt, set new (all atoms) value to it
# treatment 1 may be considerd
qa = new[self.atoma]
qb = new[self.atomb]
d = qa -qb
q = sqrt(np.dot(d, d))
qmiddle = 0.5 * (qa + qb)
d *= 0.5
qa = qmiddle + dt / q * d
qb = qmiddle - dt / q * d
if t1:
fa = sqrt(np.dot(forces[self.atoma], forces[self.atoma]))
fb = sqrt(np.dot(forces[self.atomb], forces[self.atomb]))
lam = (fa - fb) / (fa + fb)
qa += lam * (1 - dt / q) * d
qb += lam * (1 - dt / q) * d
new[self.atoma] = qa
new[self.atomb] = qb
def centerofnetwork(self):
# the liualgorithm has to know around which atoms to build a network
return [self.atoma , self.atomb]
class netinfo:
def __init__(self, number, dist_to_center = None, shell = 0,
multiplicity = 0, nextN = [], dist_to_next = [], radius = None, center = 0 ):
'''Gives the info of a atomicnetwork for one (current) atom, related to a center
the variables stored there are:
number: the number of the atom, consider that the lists start with 0, but
the first atom is 1, but number can be used to get place in list back
dist_to_center: gives the distance in units of atom posititions from the current
atom to the center of the network
shell: integer number, gives the number of atoms over which one has to go (at least) to
reach the center, starting from the current atom
nextN: direct neighbors of current atom (of shell -1), current atom has bond to them and they are
all nearer to center atom than current atom
dist_to_next: distance to the direct neigbors, in the same order given
multiplicity: dimension of nextN, how many direct neigbohrs are there
radius: radius of the current atom, just to store it and have easier access
center: center of the atomic network (as number of it)
'''
self.number = number
self.dist_to_center = dist_to_center
self.shell = shell
self.multiplicity = multiplicity
self.nextN = nextN
self.dist_to_next = dist_to_next
self.radius = radius
self.center = center
|
alexei-matveev/ase-local
|
ase/liutssearch.py
|
Python
|
gpl-2.0
| 31,652
|
[
"ASE"
] |
deec797027dc86c5036437f43b804800965482f1a9c85441ba370112c637a5cd
|
import glob
import pandas as pd
import numpy as np
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell1_22*")
print(len(mcell))
trito = glob.glob("binary_position_RRBS_trito_pool_1*")
print(len(trito))
totalfiles = mcell + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ['RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACAACC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG',
'RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_cll_9.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
epiphen/tests/normalCll9.py
|
Python
|
mit
| 3,672
|
[
"MCell"
] |
63977185fd0b275312777b7feb1a4eade795583d22d8a8cd50e742c0a70cef6f
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from django.db import models
from bartender.models import Article
from django.contrib.auth.models import User
from django.forms import ModelForm
import search
class Letter(models.Model):
"""Letters to the Editor"""
user = models.ForeignKey(User,verbose_name="Author")
article = models.ForeignKey(Article,null=True,blank=True,verbose_name='In reply to article')
letter = models.ForeignKey('Letter',null=True,blank=True,related_name='replies',verbose_name='In reply to letter')
title = models.CharField(max_length=100)
body = models.TextField()
offensive = models.BooleanField(default=False)
notify = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return "/letters/%i/" % self.id
def __unicode__(self):
return u"%s" % (self.title)
search.register(Letter,fields=[{"field_name":"title","is_title":True},{"field_name":"body"}])
class LetterForm(ModelForm):
"""Form for the Letters"""
class Meta:
model=Letter
exclude = ('created','offensive','user','article','letter','editor_pick')
class EditorPickManager(models.Manager):
def get_published(self):
return self.filter(publish=True, pub_date__lte=datetime.now)
def get_top_two_published(self):
return self.filter(publish=True, pub_date__lte=datetime.now).order_by("-pub_date")[:2]
class EditorPick(models.Model):
"""A letter picked by an editor"""
letter = models.ForeignKey(Letter,related_name="editor_pick")
pub_date = models.DateTimeField("Publish date", default=datetime.now)
publish = models.BooleanField("Publish on site", default=True,
help_text='Editors picks will not appear on the site until their "publish date".')
objects = EditorPickManager()
def __unicode__(self):
return u"'%s'" % (self.letter.title)
|
brianboyer/newsmixer
|
social/letters/models.py
|
Python
|
gpl-3.0
| 2,816
|
[
"Brian"
] |
aeea14e02a0b6345588b393da5adb1a35168f1baeeae088d83594f50ea5f17cf
|
'''This will take an aligned multiple FASTA file with multiple genes
and create individual FASTA alignment files for each gene and simplify the
fasta headers.
Copyright 2016 by Shawn Rupp'''
import argparse
from glob import glob
def splitFasta(infile, outdir):
# Open input file and split into one alignment per gene
print("\tSplitting fasta file into one file per gene...")
passed = 0
excluded = 0
# Create log file
log = ""
path = outdir.split("/")[:-2]
for i in path:
log += i + "/"
log += "splitFastaLog.txt"
with open(log, "w") as logfile:
logfile.write("Transcripts with only one species\n\n")
# Parse input fasta
with open(infile, "r") as fasta:
newid = True
prev = True
seq = ""
n = 0
for line in fasta:
if line.strip():
if line[0] == ">":
prev = True
build, geneid = convertHeader(line)
# Concatenate lines for all species for each gene
if newid == True:
# Set reference species ID as file name
filename = geneid
newid = False
else:
# Concatenate remaining lines
line = line.upper()
if ("A" not in line or "C" not in line or "G" not in line
or "T" not in line):
pass
else:
# Save gene if it contains nucleotides
if prev == True:
n += 1
seq += build
prev == False
seq += str(line)
elif not line.strip() and newid == False:
# Use empty lines to determine where genes end
if n >= 2 and seq.count("\n") > 3:
# Print gene sequences to file if there are at least two
# species and reset for next gene
outfile = (outdir + filename + "." + str(n) + ".fa")
with open(outfile, "w") as output:
output.write(seq)
newid = True
prev = False
seq = ""
n = 0
passed += 1
else:
# Skip genes with only one sequence and save ID in log
with open(log, "a") as logfile:
logfile.write(geneid + "\n")
excluded += 1
newid = True
with open(log, "a") as logfile:
logfile.write(("\nTotal transcripts written to file: {}\n").format(passed))
logfile.write(("Total transcripts with only one sequence: {}").format(excluded))
def convertHeader(line):
'''Returns a header containing only the genome build name.'''
if "_" in line:
# Extract relevant data from UCSC header
genebuild = line[1:].split()[0]
genebuild = genebuild.split("_")
if line[1] == "E":
# Ensembl IDs
build = ">" + str(genebuild[1]) + "\n"
geneid = str(genebuild[0].split(".")[0])
elif line[1] == "N":
# NCBI IDs
build = ">" + str(genebuild[2]) + "\n"
geneid = str(genebuild[0]) + "_" + str(genebuild[1])
elif "TRINITY" in line:
line = line.split("TRINITY-")[1]
line = line.strip()
build = ">" + line[:line.find("-")] + "\n"
geneid = line[line.find("-")+1:]
else:
# Extract build and geneid
build = ">" + line.split(".")[0][1:].rstrip() + "\n"
geneid = str(line.split(".")[1]).rstrip()
if geneid and build:
return build, geneid
else:
print("Please use a fasta file with Ensembl, NCBI, or Galaxy Stitch \
Gene Blocks IDs.")
quit()
def main():
parser = argparse.ArgumentParser(description="This will take the \
aligned multiple FASTA file with multiple genes and create individual FASTA \
alignment files for each gene and simplify the fasta headers.")
parser.add_argument("-i", help="path to input file.")
parser.add_argument("-o", help="path to output directory.")
args = parser.parse_args()
infile = args.i
outdir = args.o
if outdir[-1] != "/":
outdir += "/"
splitFasta(infile, outdir)
if __name__ == "__main__":
main()
|
WilsonSayresLab/AlignmentProcessor
|
bin/01_SplitFasta.py
|
Python
|
gpl-3.0
| 3,584
|
[
"Galaxy"
] |
1426d7328a4fe11146a06e710eca9bc0b4a559946f65593d60dd3f6012c9654f
|
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
import time
import json
import re
import colorsys
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from atip.tizen import tizen
from atip.common import common
try:
from urlparse import urljoin, urlparse
except ImportError:
from urllib.parse import urljoin, urlparse
class WebAPP(common.APP):
def __init__(self, app_config=None, app_name=None, apk_pkg_name=None, apk_activity_name=None):
self.driver = None
self.app_type = common.APP_TYPE_WEB
self.app_name = app_name
self.app_id = ""
self.text_value = {}
self.color_dict = {"rgb(255, 0, 0)": "red","rgb(0, 255, 0)": "green","rgb(0, 0, 255)": "blue","rgb(255, 255, 0)": "yellow","rgb(0, 0, 0)": "black","rgb(0, 128, 0)": "green","rgb(255, 255, 255)": "white","rgba(0, 0, 0, 0)": "white"}
apk_activity_name = apk_activity_name
apk_pkg_name = apk_pkg_name
if "platform" in app_config and "name" in app_config["platform"]:
if app_config["platform"]["name"].upper().find('TIZEN') >= 0:
self.app_id = tizen.get_appid_by_name(
self.app_name, app_config["platform"], app_config["tizen_user"])
if app_config["platform"]["name"].upper().find('ANDROID') >= 0:
if apk_activity_name == apk_pkg_name == None:
if "app_launcher" in app_config and app_config["app_launcher"] == "XWalkLauncher":
self.app_name = self.app_name.replace("-", "_")
apk_name_update = "".join(
[i.capitalize() for i in self.app_name.split("_") if i])
apk_activity_name = ".%sActivity" % apk_name_update
apk_pkg_name = "org.xwalk.%s" % self.app_name
if "app_launcher" in app_config and app_config["app_launcher"] == "CordovaLauncher":
self.app_name = self.app_name.replace("-", "_")
apk_activity_name = ".%s" % self.app_name
apk_pkg_name = "org.xwalk.%s" % self.app_name
app_config_str = json.dumps(app_config).replace(
"TEST_APP_NAME", self.app_name).replace(
"TEST_APP_ID", self.app_id).replace(
"TEST_PKG_NAME", apk_pkg_name).replace(
"TEST_ACTIVITY_NAME", apk_activity_name)
self.app_config = json.loads(app_config_str)
if "url-prefix" in app_config:
self.url_prefix = app_config["url-prefix"]
else:
self.url_prefix = ""
def __get_element_by_xpath(self, xpath, display=True):
try:
element = self.driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key_attr(self, key, attr, display=True):
xpath = "//*[@%s='%s']" % (attr, key)
try:
element = self.driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_tag(self, key, display=True):
try:
element = self.driver.find_element_by_tag(key)
return element
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key(self, key, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//div[contains(text(), '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key}):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_keys(self, key_p, key_c, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//div[contains(text(), '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key_p}):
get_element = False
if display:
try:
if i_element.is_displayed():
get_element = True
except StaleElementReferenceException:
pass
else:
get_element = True
if get_element:
print "%s ++ %s" % (i_element.get_attribute("id"), i_element.get_attribute("class"))
for ii_element in i_element.find_elements_by_xpath(str(
"./*[@id='%(key)s']|"
"./*[@name='%(key)s']|"
"./*[@value='%(key)s']|"
"./*[contains(@class, '%(key)s')]|"
"./div[contains(text(), '%(key)s')]|"
"./button[contains(text(), '%(key)s')]|"
"./input[contains(text(), '%(key)s')]|"
"./textarea[contains(text(), '%(key)s')]|"
"./a[contains(text(), '%(key)s')]") % {'key': key_c}):
if display:
try:
if ii_element.is_displayed():
return ii_element
except StaleElementReferenceException:
pass
else:
return ii_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __check_normal_text(self, text, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def check_normal_text_element_not_exist(self, text, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
try:
e_list = element.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text)))
for i_element in e_list:
if i_element.text == text:
return False
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def __check_normal_text_element(self, text, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
try:
for i_element in element.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def compare_two_values(self, first=None, second=None):
try:
if eval(self.text_value[first]) < eval(self.text_value[second]):
return True
else:
return False
except Exception as e:
print "Failed to compare these two param: %s" % e
return False
def save_content(self, p_name=None, key=None):
try:
js_script = 'var style=document.getElementById(\"' + key + '\").innerHTML; return style'
style = self.driver.execute_script(js_script)
self.text_value[p_name] = style
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def launch_app(self):
try:
desired_capabilities = self.app_config["desired-capabilities"]
self.driver = WebDriver(
str(self.app_config["driver-url"]), desired_capabilities)
except Exception as e:
print "Failed to launch %s: %s" % (self.app_name, e)
return False
return True
def switch_url(self, url, with_prefix=True):
if with_prefix:
url = urljoin(self.url_prefix, url)
try:
self.driver.get(url)
except Exception as e:
print "Failed to visit %s: %s" % (url, e)
return False
return True
def title(self):
try:
return self.driver.title
except Exception as e:
print "Failed to get title: %s" % e
return None
def current_url(self):
try:
return self.driver.current_url
except Exception as e:
print "Failed to get current url: %s" % e
return None
def reload(self):
self.driver.refresh()
return True
def back(self):
self.driver.back()
return True
def forward(self):
self.driver.forward()
return True
def check_normal_text_timeout(self, text=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text(text, display):
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout(
self, text=None, key=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout_with_color(
self, text=None, key=None, color=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
if self.check_text_color(key, color):
return True
time.sleep(0.2)
return False
def check_normal_element_timeout_with_color(
self, key=None, color=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.check_background_color(key, color):
return True
time.sleep(0.2)
return False
def check_background_color(self, key=None, color=None, display=True):
try:
js_script = 'var bg_color=document.getElementById(\"' + key + '\").style.backgroundColor; return bg_color'
bg_color = self.driver.execute_script(js_script)
if not bg_color:
js_script = 'var element=document.getElementById(\"' + key + '\");' \
' if(element.currentStyle) {return element.currentStyle.backgroundColor;} ' \
' else { return document.defaultView.getComputedStyle(element,null).backgroundColor; } '
bg_color = self.driver.execute_script(js_script)
if not bg_color:
bg_color = "white"
number = re.match(r'[A-Za-z]+$',bg_color)
if not number:
bg_color = self.color_dict[bg_color]
if bg_color.strip() == color:
return True
except Exception as e:
print "Failed to get element color: %s" % e
return False
def check_text_color(self, key=None, color=None, display=True):
try:
js_script = 'var text_color=document.getElementById(\"' + key + '\").style.color; return text_color'
text_color = self.driver.execute_script(js_script)
if not text_color:
js_script = 'var element=document.getElementById(\"' + key + '\");' \
' if(element.currentStyle) {return element.currentStyle.color;} ' \
' else { return document.defaultView.getComputedStyle(element,null).color; } '
text_color = self.driver.execute_script(js_script)
if not text_color:
text_color = "black"
is_rgb = re.match(r'[A-Za-z]+$',text_color)
if not is_rgb:
text_color = self.color_dict[text_color]
if text_color.strip() == color:
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def check_content_type(self, key=None, display=True):
try:
js_script = 'var text=document.getElementById(\"' + key + '\").innerText; return text'
text = self.driver.execute_script(js_script)
if text.strip() == '':
return 'none'
number = re.match(r'(-?\d+)(\.\d+)?',text)
if number:
if "." in text:
return "float"
else:
return "int"
else:
if text.upper() == "TRUE" or text.upper() == "FALSE":
return "boolean"
else:
return "string"
except Exception as e:
print "Failed to get element text: %s" % e
def press_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def press_element_by_keys(self, key_p, key_c, display=True):
element = self.__get_element_by_keys(key_p, key_c, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def press_element_by_key_attr(self, key, attr, display=True):
element = self.__get_element_by_key_attr(key, attr, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def click_element_by_keys(self, key_p, key_c, display=True):
element = self.__get_element_by_keys(key_p, key_c, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
ActionChains(self.driver).click(element).perform()
return True
return False
def click_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
ActionChains(self.driver).click(element).perform()
return True
return False
# * The method click_element_by_key will fail when VKB shelter the button, and js can avoid this issue.
def click_element_by_id_with_js(self, key, display=True):
element = self.__get_element_by_key_attr(key, "id", display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
js_script = 'document.getElementById(\"' + key + '\").click()'
self.driver.execute_script(js_script)
return True
return False
def click_element_coords(self, x, y, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
ActionChains(self.driver).move_to_element_with_offset(
element, x, y).click().perform()
return True
return False
def fill_element_by_key(self, key, text, display=True):
element = self.__get_element_by_key(key, display)
if element:
element.send_keys(text)
return True
return False
def fill_element_by_key_attr(self, key, attr, text, display=True):
element = self.__get_element_by_key_attr(key, attr, display)
if element:
element.send_keys(text)
return True
return False
def check_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if not element.is_selected():
element.click()
return True
return False
def uncheck_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if element.is_selected():
element.click()
return True
return False
def get_alert_text(self):
try:
alert_element = self.driver.switch_to_alert()
if alert_element:
return alert_element.text
except Exception as e:
print "Failed to get alert text: %s" % e
return None
def check_alert_existing(self):
try:
self.driver.switch_to_alert().text
except NoAlertPresentException:
return False
return True
def accept_alert(self):
try:
alert_element = self.driver.switch_to_alert()
alert_element.accept()
return True
except Exception as e:
print "Failed to accept alert: %s" % e
return False
def quit(self):
if self.driver:
self.driver.quit()
def launch_webapp_by_name(context, app_name, apk_pkg_name=None, apk_activity_name=None):
if not context.web_config:
assert False
if app_name in context.apps:
context.apps[app_name].quit()
context.apps.update({app_name: WebAPP(context.web_config, app_name, apk_pkg_name, apk_activity_name)})
context.app = context.apps[app_name]
if not context.app.launch_app():
assert False
assert True
|
cicili/tools
|
atip/atip/web/web.py
|
Python
|
mit
| 22,534
|
[
"VisIt"
] |
1e30e2d9df2f04b385ea7474a7ed484ed789eca9208c9745da0895771792a5c2
|
#!/usr/bin/env python3
import argparse
import os
from pyxpad import PyXPad
from Qt.QtWidgets import QApplication
import sys
def main():
"""
Data visualisation and analysis tool in Python,
intended to be familiar to users of XPAD.
Primarily for IDAM data from the MAST tokamak experiment,
but can be used to view NetCDF files currently.
"""
# Add command line arguments
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("-c", "--config", nargs=1, help="Config file to load",
default=None)
parser.add_argument("-i", "--ignore-config", help="Ignore existing config files",
action="store_true", default=False)
args = parser.parse_args()
loadfile = args.config[0] if args.config is not None else None
app = QApplication(sys.argv)
window = PyXPad(loadfile=loadfile, ignoreconfig=args.ignore_config)
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
bendudson/pyxpad
|
pyxpad/__main__.py
|
Python
|
gpl-3.0
| 1,015
|
[
"NetCDF"
] |
aa07244f24a19ea28a42749ad3abb79394560151ac4d1abdfcd545917d5b9770
|
#!/usr/local/epd/bin/python
#------------------------------------------------------------------------------------------------------
#
# Dirac propagator in the phase space x-px
#
#------------------------------------------------------------------------------------------------------
import numpy as np
import scipy.fftpack as fftpack
import h5py
import time
import sympy as sympy
#from pyfft.cuda import Plan
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.reduction as reduction
import cufft_wrapper as cuda_fft
#-----------------------------------------------------------------------------------------
gpu_array_copy_source = """
#include <pycuda-complex.hpp>
#include<math.h>
__global__ void Kernel(pycuda::complex<double> *W_new , pycuda::complex<double> *W)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
W_new[indexTotal] = W[indexTotal];
}
"""
pickup_negatives_source = """
//............................................................................................
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void pickup_negatives_Kernel( pycuda::complex<double> *W_neg,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44 )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
pycuda::complex<double> value = W11[indexTotal];
value += W22[indexTotal];
value += W33[indexTotal];
value += W44[indexTotal];
double value_re = pycuda::real<double>( value );
if( value_re < 0. ) W_neg[indexTotal] = pycuda::complex<double>(value_re,0.);
else W_neg[indexTotal] = pycuda::complex<double>(0. , 0.);
}
"""
transmission_source = """
//............................................................................................
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void transmission_Kernel( pycuda::complex<double> *W_transmission,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44 )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
//double theta = dtheta*( i - 0.5*P_gridDIM );
pycuda::complex<double> value = W11[indexTotal];
value += W22[indexTotal];
value += W33[indexTotal];
value += W44[indexTotal];
//double value_re = pycuda::real<double>( value );
if( x > 10. ) W_transmission[indexTotal] = value;
else W_transmission[indexTotal] = pycuda::complex<double>(0. , 0.);
}
"""
#------------------------------------------------------------------------------------------
CUDAsource_AbsorbBoundary_x = """
//............................................................................................
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void Kernel( double width,
pycuda::complex<double> *W11, pycuda::complex<double> *W12, pycuda::complex<double> *W13, pycuda::complex<double> *W14,
pycuda::complex<double> *W21, pycuda::complex<double> *W22, pycuda::complex<double> *W23, pycuda::complex<double> *W24,
pycuda::complex<double> *W31, pycuda::complex<double> *W32, pycuda::complex<double> *W33, pycuda::complex<double> *W34,
pycuda::complex<double> *W41, pycuda::complex<double> *W42, pycuda::complex<double> *W43, pycuda::complex<double> *W44 )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
int j = threadIdx.x + blockIdx.z*blockDim.x;
double j2 = pow( double(j-X_gridDIM/2)/width , 2);
W11[indexTotal] *= 1. - exp( -j2 );
W12[indexTotal] *= 1. - exp( -j2 );
W13[indexTotal] *= 1. - exp( -j2 );
W14[indexTotal] *= 1. - exp( -j2 );
W21[indexTotal] *= 1. - exp( -j2 );
W22[indexTotal] *= 1. - exp( -j2 );
W23[indexTotal] *= 1. - exp( -j2 );
W24[indexTotal] *= 1. - exp( -j2 );
W31[indexTotal] *= 1. - exp( -j2 );
W32[indexTotal] *= 1. - exp( -j2 );
W33[indexTotal] *= 1. - exp( -j2 );
W34[indexTotal] *= 1. - exp( -j2 );
W41[indexTotal] *= 1. - exp( -j2 );
W42[indexTotal] *= 1. - exp( -j2 );
W43[indexTotal] *= 1. - exp( -j2 );
W44[indexTotal] *= 1. - exp( -j2 );
}
"""
#------------------------------------------------------------------------------------------
CUDAsource_P_plus_Lambda = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel(
pycuda::complex<double> *W11, pycuda::complex<double> *W12, pycuda::complex<double> *W13, pycuda::complex<double> *W14,
pycuda::complex<double> *W21, pycuda::complex<double> *W22, pycuda::complex<double> *W23, pycuda::complex<double> *W24,
pycuda::complex<double> *W31, pycuda::complex<double> *W32, pycuda::complex<double> *W33, pycuda::complex<double> *W34,
pycuda::complex<double> *W41, pycuda::complex<double> *W42, pycuda::complex<double> *W43, pycuda::complex<double> *W44 )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double p1 = dp*( i - 0.5*P_gridDIM );
double lambda1 = dlambda*( j - 0.5*X_gridDIM );
double lambdap1 = p1 + lambda1/2. ;
double shell = sqrt( pow( mass_half*c , 2 ) + pow(lambdap1 , 2) );
pycuda::complex<double> U11 = pycuda::complex<double>( cos( dt*c*shell ), - mass_half*c*sin(c*dt*shell)/shell );
pycuda::complex<double> U44 = pycuda::complex<double>( cos( dt*c*shell ), mass_half*c*sin(c*dt*shell)/shell );
pycuda::complex<double> U14 = pycuda::complex<double>( 0. , -lambdap1 * sin( dt*c*shell )/shell );
pycuda::complex<double> U41 = pycuda::complex<double>( 0. , -lambdap1 * sin( dt*c*shell )/shell );
pycuda::complex<double> U22 = U11;
pycuda::complex<double> U23 = U41;
pycuda::complex<double> U32 = U14;
pycuda::complex<double> U33 = U44;
//..............................................................................................................
pycuda::complex<double> W_Plus11, W_Plus12, W_Plus13, W_Plus14;
pycuda::complex<double> W_Plus21, W_Plus22, W_Plus23, W_Plus24;
pycuda::complex<double> W_Plus31, W_Plus32, W_Plus33, W_Plus34;
pycuda::complex<double> W_Plus41, W_Plus42, W_Plus43, W_Plus44;
//PsiPlus1 = U11 *Psi1[indexTotal] + U14 *Psi4[indexTotal];
//PsiPlus2 = U22 *Psi2[indexTotal] + U23 * Psi3[indexTotal] ;
//PsiPlus3 = U32 *Psi2[indexTotal] + U33 * Psi3[indexTotal] ;
//PsiPlus4 = U41 *Psi1[indexTotal] + U44 *Psi4[indexTotal];
W_Plus11 = U11 *W11[indexTotal] + U14 *W41[indexTotal];
W_Plus21 = U22 *W21[indexTotal] + U23 * W31[indexTotal] ;
W_Plus31 = U32 *W21[indexTotal] + U33 * W31[indexTotal] ;
W_Plus41 = U41 *W11[indexTotal] + U44 *W41[indexTotal];
W11[indexTotal] = W_Plus11;
W21[indexTotal] = W_Plus21;
W31[indexTotal] = W_Plus31;
W41[indexTotal] = W_Plus41;
//.........................
W_Plus12 = U11 *W12[indexTotal] + U14 *W42[indexTotal];
W_Plus22 = U22 *W22[indexTotal] + U23 * W32[indexTotal] ;
W_Plus32 = U32 *W22[indexTotal] + U33 * W32[indexTotal] ;
W_Plus42 = U41 *W12[indexTotal] + U44 *W42[indexTotal];
W12[indexTotal] = W_Plus12;
W22[indexTotal] = W_Plus22;
W32[indexTotal] = W_Plus32;
W42[indexTotal] = W_Plus42;
//........................
W_Plus13 = U11 *W13[indexTotal] + U14 *W43[indexTotal];
W_Plus23 = U22 *W23[indexTotal] + U23 * W33[indexTotal] ;
W_Plus33 = U32 *W23[indexTotal] + U33 * W33[indexTotal] ;
W_Plus43 = U41 *W13[indexTotal] + U44 *W43[indexTotal];
W13[indexTotal] = W_Plus13;
W23[indexTotal] = W_Plus23;
W33[indexTotal] = W_Plus33;
W43[indexTotal] = W_Plus43;
//........................
W_Plus14 = U11 *W14[indexTotal] + U14 *W44[indexTotal];
W_Plus24 = U22 *W24[indexTotal] + U23 *W34[indexTotal] ;
W_Plus34 = U32 *W24[indexTotal] + U33 *W34[indexTotal] ;
W_Plus44 = U41 *W14[indexTotal] + U44 *W44[indexTotal];
W14[indexTotal] = W_Plus14;
W24[indexTotal] = W_Plus24;
W34[indexTotal] = W_Plus34;
W44[indexTotal] = W_Plus44;
}
"""
CUDAsource_P_minus_Lambda = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s;
__global__ void Kernel(
pycuda::complex<double> *W11, pycuda::complex<double> *W12, pycuda::complex<double> *W13, pycuda::complex<double> *W14,
pycuda::complex<double> *W21, pycuda::complex<double> *W22, pycuda::complex<double> *W23, pycuda::complex<double> *W24,
pycuda::complex<double> *W31, pycuda::complex<double> *W32, pycuda::complex<double> *W33, pycuda::complex<double> *W34,
pycuda::complex<double> *W41, pycuda::complex<double> *W42, pycuda::complex<double> *W43, pycuda::complex<double> *W44 )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double p1 = dp*( i - 0.5*P_gridDIM );
double lambda1 = dlambda*( j - 0.5*X_gridDIM );
double lambdap1 = p1 - lambda1/2. ;
double shell = sqrt( pow( mass_half*c , 2 ) + pow(lambdap1 , 2) );
pycuda::complex<double> U11 = pycuda::complex<double>( cos( dt*c*shell ), - mass_half*c*sin(-c*dt*shell)/shell );
pycuda::complex<double> U44 = pycuda::complex<double>( cos( dt*c*shell ), mass_half*c*sin(-c*dt*shell)/shell );
pycuda::complex<double> U14 = pycuda::complex<double>( 0. , -lambdap1 * sin( -dt*c*shell )/shell );
pycuda::complex<double> U41 = pycuda::complex<double>( 0. , -lambdap1 * sin( -dt*c*shell )/shell );
pycuda::complex<double> U22 = U11;
pycuda::complex<double> U23 = U41;
pycuda::complex<double> U32 = U14;
pycuda::complex<double> U33 = U44;
//..............................................................................................................
pycuda::complex<double> W_Plus11, W_Plus12, W_Plus13, W_Plus14;
pycuda::complex<double> W_Plus21, W_Plus22, W_Plus23, W_Plus24;
pycuda::complex<double> W_Plus31, W_Plus32, W_Plus33, W_Plus34;
pycuda::complex<double> W_Plus41, W_Plus42, W_Plus43, W_Plus44;
W_Plus11 = U11 *W11[indexTotal] + U41 *W14[indexTotal];
W_Plus21 = U22 *W12[indexTotal] + U32 * W13[indexTotal] ;
W_Plus31 = U23 *W12[indexTotal] + U33 * W13[indexTotal] ;
W_Plus41 = U14 *W11[indexTotal] + U44 *W14[indexTotal];
W11[indexTotal] = W_Plus11;
W12[indexTotal] = W_Plus21;
W13[indexTotal] = W_Plus31;
W14[indexTotal] = W_Plus41;
//.........................
W_Plus12 = U11 *W21[indexTotal] + U41 *W24[indexTotal];
W_Plus22 = U22 *W22[indexTotal] + U32 * W23[indexTotal] ;
W_Plus32 = U23 *W22[indexTotal] + U33 * W23[indexTotal] ;
W_Plus42 = U14 *W21[indexTotal] + U44 *W24[indexTotal];
W21[indexTotal] = W_Plus12;
W22[indexTotal] = W_Plus22;
W23[indexTotal] = W_Plus32;
W24[indexTotal] = W_Plus42;
//........................
W_Plus13 = U11 *W31[indexTotal] + U41 *W34[indexTotal];
W_Plus23 = U22 *W32[indexTotal] + U32 * W33[indexTotal] ;
W_Plus33 = U23 *W32[indexTotal] + U33 * W33[indexTotal] ;
W_Plus43 = U14 *W31[indexTotal] + U44 *W34[indexTotal];
W31[indexTotal] = W_Plus13;
W32[indexTotal] = W_Plus23;
W33[indexTotal] = W_Plus33;
W34[indexTotal] = W_Plus43;
//........................
W_Plus14 = U11 *W41[indexTotal] + U41 *W44[indexTotal];
W_Plus24 = U22 *W42[indexTotal] + U32 *W43[indexTotal] ;
W_Plus34 = U23 *W42[indexTotal] + U33 *W43[indexTotal] ;
W_Plus44 = U14 *W41[indexTotal] + U44 *W44[indexTotal];
W41[indexTotal] = W_Plus14;
W42[indexTotal] = W_Plus24;
W43[indexTotal] = W_Plus34;
W44[indexTotal] = W_Plus44;
}
"""
#------------------------------------------------------------------------------------
DiracPropagator_X_minus_Theta_source_Base = """
//
// source code for the right Dirac propagator without vector potential interaction
//
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s // Constants
__device__ double Potential0(double t, double x)
{
return %s ;
}
__device__ double Potential1(double t, double x)
{
return %s ;
}
__device__ double Potential2(double t, double x)
{
return %s ;
}
__device__ double Potential3(double t, double x)
{
return %s ;
}
__device__ double Mass(double t, double x)
{
return %s ;
}
__device__ double VectorPotentialSquareSum(double t, double x)
{
return pow(Potential1(t,x), 2.) + pow(Potential2(t,x), 2.) + pow(Potential3(t,x), 2.);
}
//............................................................................................................
__global__ void DiracPropagation4_Kernel(
pycuda::complex<double> *W11, pycuda::complex<double> *W12, pycuda::complex<double> *W13, pycuda::complex<double> *W14,
pycuda::complex<double> *W21, pycuda::complex<double> *W22, pycuda::complex<double> *W23, pycuda::complex<double> *W24,
pycuda::complex<double> *W31, pycuda::complex<double> *W32, pycuda::complex<double> *W33, pycuda::complex<double> *W34,
pycuda::complex<double> *W41, pycuda::complex<double> *W42, pycuda::complex<double> *W43, pycuda::complex<double> *W44,
double t , pycuda::complex<double> *B_GP_minus_GPU, pycuda::complex<double> *B_GP_plus_GPU, double aGPitaevskii )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double theta = dtheta*( i - 0.5*P_gridDIM );
double xtheta = x - 0.5*theta ;
double F;
double mass = Mass(t,xtheta);
F = sqrt( pow( mass*c*c*dt ,2.) + VectorPotentialSquareSum(t,xtheta)*dt*dt );
pycuda::complex<double> I = pycuda::complex<double>(0.,1.);
pycuda::complex<double> U11 = pycuda::complex<double>( cos(F) , -mass*c*c*dt*sin(F)/F );
pycuda::complex<double> U33 = pycuda::complex<double>( cos(F) , mass*c*c*dt*sin(F)/F );
pycuda::complex<double> U13,U14;
pycuda::complex<double> U22,U23,U24;
pycuda::complex<double> U31,U32 ;
pycuda::complex<double> U41,U42 ,U44;
double phaseGP = aGPitaevskii * pycuda::real<double>( B_GP_minus_GPU[indexTotal] );
pycuda::complex<double> expV = exp( -dt*D_Theta*theta*theta/2. - I*dt*( Potential0(t,xtheta) + phaseGP ) );
U22 = U11; U44 = U33;
U13 = I*dt*Potential3(t,xtheta)*sin(F)/F;
U14 = dt*(I*Potential1(t,xtheta) + Potential2(t,xtheta) )*sin(F)/F;
U23 = dt*(I*Potential1(t,xtheta) - Potential2(t,xtheta) )*sin(F)/F;
U24 = -U13;
U31 = U13; U32 = U14;
U41 = U23;
U42 = U24;
pycuda::complex<double> PsiPlus1, PsiPlus2, PsiPlus3, PsiPlus4;
//..........................................................................................................
PsiPlus1=expV*( U11*W11[indexTotal] + U13*W31[indexTotal] + U14*W41[indexTotal] );
PsiPlus2=expV*( U22*W21[indexTotal] + U23*W31[indexTotal] + U24*W41[indexTotal] );
PsiPlus3=expV*( U31*W11[indexTotal] + U32*W21[indexTotal] + U33*W31[indexTotal] );
PsiPlus4=expV*( U41*W11[indexTotal] + U42*W21[indexTotal] + U44*W41[indexTotal] );
W11[indexTotal] = PsiPlus1;
W21[indexTotal] = PsiPlus2;
W31[indexTotal] = PsiPlus3;
W41[indexTotal] = PsiPlus4;
//..........................................................................................................
PsiPlus1=expV*( U11*W12[indexTotal] + U13*W32[indexTotal] + U14*W42[indexTotal] );
PsiPlus2=expV*( U22*W22[indexTotal] + U23*W32[indexTotal] + U24*W42[indexTotal] );
PsiPlus3=expV*( U31*W12[indexTotal] + U32*W22[indexTotal] + U33*W32[indexTotal] );
PsiPlus4=expV*( U41*W12[indexTotal] + U42*W22[indexTotal] + U44*W42[indexTotal] );
W12[indexTotal] = PsiPlus1;
W22[indexTotal] = PsiPlus2;
W32[indexTotal] = PsiPlus3;
W42[indexTotal] = PsiPlus4;
//..........................................................................................................
PsiPlus1=expV*( U11*W13[indexTotal] + U13*W33[indexTotal] + U14*W43[indexTotal] );
PsiPlus2=expV*( U22*W23[indexTotal] + U23*W33[indexTotal] + U24*W43[indexTotal] );
PsiPlus3=expV*( U31*W13[indexTotal] + U32*W23[indexTotal] + U33*W33[indexTotal] );
PsiPlus4=expV*( U41*W13[indexTotal] + U42*W23[indexTotal] + U44*W43[indexTotal] );
W13[indexTotal] = PsiPlus1;
W23[indexTotal] = PsiPlus2;
W33[indexTotal] = PsiPlus3;
W43[indexTotal] = PsiPlus4;
//..........................................................................................................
PsiPlus1=expV*( U11*W14[indexTotal] + U13*W34[indexTotal] + U14*W44[indexTotal] );
PsiPlus2=expV*( U22*W24[indexTotal] + U23*W34[indexTotal] + U24*W44[indexTotal] );
PsiPlus3=expV*( U31*W14[indexTotal] + U32*W24[indexTotal] + U33*W34[indexTotal] );
PsiPlus4=expV*( U41*W14[indexTotal] + U42*W24[indexTotal] + U44*W44[indexTotal] );
W14[indexTotal] = PsiPlus1;
W24[indexTotal] = PsiPlus2;
W34[indexTotal] = PsiPlus3;
W44[indexTotal] = PsiPlus4;
}
"""
#------------------------------------------------------------------------------------
DiracPropagator_X_plus_Theta_source_Base = """
//
// source code for the right Dirac propagator without vector potential interaction
//
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s // Constants
__device__ double Potential0(double t, double x)
{
return %s ;
}
__device__ double Potential1(double t, double x)
{
return %s ;
}
__device__ double Potential2(double t, double x)
{
return %s ;
}
__device__ double Potential3(double t, double x)
{
return %s ;
}
__device__ double Mass(double t, double x)
{
return %s ;
}
__device__ double VectorPotentialSquareSum(double t, double x)
{
return pow(Potential1(t,x), 2.) + pow(Potential2(t,x), 2.) + pow(Potential3(t,x), 2.);
}
//............................................................................................................
__global__ void DiracPropagation4_Kernel(
pycuda::complex<double> *W11, pycuda::complex<double> *W12, pycuda::complex<double> *W13, pycuda::complex<double> *W14,
pycuda::complex<double> *W21, pycuda::complex<double> *W22, pycuda::complex<double> *W23, pycuda::complex<double> *W24,
pycuda::complex<double> *W31, pycuda::complex<double> *W32, pycuda::complex<double> *W33, pycuda::complex<double> *W34,
pycuda::complex<double> *W41, pycuda::complex<double> *W42, pycuda::complex<double> *W43, pycuda::complex<double> *W44,
double t , pycuda::complex<double> *B_GP_minus_GPU, pycuda::complex<double> *B_GP_plus_GPU , double aGPitaevskii)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double theta = dtheta*( i - 0.5*P_gridDIM );
double xtheta = x + 0.5*theta ;
double F;
double mass = Mass(t,xtheta);
F = sqrt( pow( mass*c*c*dt , 2.) + VectorPotentialSquareSum(t,xtheta)*dt*dt );
pycuda::complex<double> I = pycuda::complex<double>(0.,1.);
pycuda::complex<double> U11 = pycuda::complex<double>( cos(F) , mass*c*c*dt*sin(F)/F );
pycuda::complex<double> U33 = pycuda::complex<double>( cos(F) , -mass*c*c*dt*sin(F)/F );
pycuda::complex<double> U13,U14;
pycuda::complex<double> U22,U23,U24;
pycuda::complex<double> U31,U32 ;
pycuda::complex<double> U41,U42 ,U44;
double phaseGP = aGPitaevskii * pycuda::real<double>( B_GP_plus_GPU[indexTotal] );
pycuda::complex<double> expV = exp( -dt*D_Theta*theta*theta/2. + I*dt*( Potential0(t,xtheta) + phaseGP ) );
U22 = U11; U44 = U33;
U13 = -I*dt*Potential3(t,xtheta)*sin(F)/F;
U14 = -dt*(I*Potential1(t,xtheta) + Potential2(t,xtheta) )*sin(F)/F;
U23 = -dt*(I*Potential1(t,xtheta) - Potential2(t,xtheta) )*sin(F)/F;
U24 = -U13;
U31 = U13; U32 = U14;
U41 = U23;
U42 = U24;
pycuda::complex<double> W_Plus11, W_Plus12, W_Plus13, W_Plus14;
pycuda::complex<double> W_Plus21, W_Plus22, W_Plus23, W_Plus24;
pycuda::complex<double> W_Plus31, W_Plus32, W_Plus33, W_Plus34;
pycuda::complex<double> W_Plus41, W_Plus42, W_Plus43, W_Plus44;
//..........................................................................................................
W_Plus11 = expV*( W11[indexTotal]*U11 + W13[indexTotal]*U31 + W14[indexTotal]*U41 );
W_Plus12 = expV*( W12[indexTotal]*U22 + W13[indexTotal]*U32 + W14[indexTotal]*U42 );
W_Plus13 = expV*( W11[indexTotal]*U13 + W12[indexTotal]*U23 + W13[indexTotal]*U33 );
W_Plus14 = expV*( W11[indexTotal]*U14 + W12[indexTotal]*U24 + W14[indexTotal]*U44 );
W11[indexTotal] = W_Plus11 ;
W12[indexTotal] = W_Plus12 ;
W13[indexTotal] = W_Plus13 ;
W14[indexTotal] = W_Plus14 ;
//..........................................................................................................
W_Plus21 = expV*( W21[indexTotal]*U11 + W23[indexTotal]*U31 + W24[indexTotal]*U41 );
W_Plus22 = expV*( W22[indexTotal]*U22 + W23[indexTotal]*U32 + W24[indexTotal]*U42 );
W_Plus23 = expV*( W21[indexTotal]*U13 + W22[indexTotal]*U23 + W23[indexTotal]*U33 );
W_Plus24 = expV*( W21[indexTotal]*U14 + W22[indexTotal]*U24 + W24[indexTotal]*U44 );
W21[indexTotal] = W_Plus21 ;
W22[indexTotal] = W_Plus22 ;
W23[indexTotal] = W_Plus23 ;
W24[indexTotal] = W_Plus24 ;
//..........................................................................................................
W_Plus31 = expV*( W31[indexTotal]*U11 + W33[indexTotal]*U31 + W34[indexTotal]*U41 );
W_Plus32 = expV*( W32[indexTotal]*U22 + W33[indexTotal]*U32 + W34[indexTotal]*U42 );
W_Plus33 = expV*( W31[indexTotal]*U13 + W32[indexTotal]*U23 + W33[indexTotal]*U33 );
W_Plus34 = expV*( W31[indexTotal]*U14 + W32[indexTotal]*U24 + W34[indexTotal]*U44 );
W31[indexTotal] = W_Plus31 ;
W32[indexTotal] = W_Plus32 ;
W33[indexTotal] = W_Plus33 ;
W34[indexTotal] = W_Plus34 ;
//..........................................................................................................
W_Plus41 = expV*( W41[indexTotal]*U11 + W43[indexTotal]*U31 + W44[indexTotal]*U41 );
W_Plus42 = expV*( W42[indexTotal]*U22 + W43[indexTotal]*U32 + W44[indexTotal]*U42 );
W_Plus43 = expV*( W41[indexTotal]*U13 + W42[indexTotal]*U23 + W43[indexTotal]*U33 );
W_Plus44 = expV*( W41[indexTotal]*U14 + W42[indexTotal]*U24 + W44[indexTotal]*U44 );
W41[indexTotal] = W_Plus41 ;
W42[indexTotal] = W_Plus42 ;
W43[indexTotal] = W_Plus43 ;
W44[indexTotal] = W_Plus44 ;
}
"""
#.........................................................................................................
BaseCUDAsource_FilterGPU = """
//
// source code for filtering particles/antiparticles
//
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
//............................................................................................................
__global__ void Filter_Kernel(
pycuda::complex<double> *_Psi11, pycuda::complex<double> *_Psi12, pycuda::complex<double> *_Psi13,
pycuda::complex<double> *_Psi14, pycuda::complex<double> *_Psi21,
pycuda::complex<double> *_Psi22, pycuda::complex<double> *_Psi23, pycuda::complex<double> *_Psi24,
pycuda::complex<double> *_Psi31, pycuda::complex<double> *_Psi32, pycuda::complex<double> *_Psi33,
pycuda::complex<double> *_Psi34, pycuda::complex<double> *_Psi41, pycuda::complex<double> *_Psi42,
pycuda::complex<double> *_Psi43, pycuda::complex<double> *_Psi44, pycuda::complex<double> *psi11,
pycuda::complex<double> *psi12, pycuda::complex<double> *psi13, pycuda::complex<double> *psi14,
pycuda::complex<double> *psi21, pycuda::complex<double> *psi22, pycuda::complex<double> *psi23,
pycuda::complex<double> *psi24, pycuda::complex<double> *psi31, pycuda::complex<double> *psi32,
pycuda::complex<double> *psi33, pycuda::complex<double> *psi34, pycuda::complex<double> *psi41,
pycuda::complex<double> *psi42, pycuda::complex<double> *psi43, pycuda::complex<double> *psi44,
int sign )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double p = dp*( i - 0.5*P_gridDIM );
double Lambda = dlambda*( j - 0.5*X_gridDIM );
double bb,sqrtp,aa;
double cc,dd,sqrtpL;
double aaL,bbL;
double ccL,ddL;
sqrtp = 2.*sqrt( Bare_mass*Bare_mass*c*c*c*c + c*c*(p+Lambda/2.)*(p+Lambda/2.) );
aa = 0.5 + sign*(Bare_mass*c*c/sqrtp);
bb = c*(sign*(p+Lambda/2.)/sqrtp);
cc = c*(sign*(p+Lambda/2.)/sqrtp);
dd = 0.5 - sign*(Bare_mass*c*c/sqrtp);
sqrtpL = 2.*sqrt( Bare_mass*Bare_mass*c*c*c*c + c*c*(p-Lambda/2.)*(p-Lambda/2.) );
aaL = 0.5 + sign*(Bare_mass*c*c/sqrtpL);
bbL = c*(sign*(p-Lambda/2.)/sqrtpL);
ccL = c*(sign*(p-Lambda/2.)/sqrtpL);
ddL = 0.5 - (sign*Bare_mass*c*c/sqrtpL);
pycuda::complex<double> Projector00, ProjectorL00;
pycuda::complex<double> Projector03, ProjectorL03;
pycuda::complex<double> Projector11, ProjectorL11;
pycuda::complex<double> Projector21, ProjectorL21;
pycuda::complex<double> Projector30, ProjectorL30;
pycuda::complex<double> Projector12, ProjectorL12;
pycuda::complex<double> Projector22, ProjectorL22;
pycuda::complex<double> Projector33, ProjectorL33;
pycuda::complex<double> Projector01, ProjectorL01;
pycuda::complex<double> Projector02, ProjectorL02;
pycuda::complex<double> Projector10, ProjectorL10;
pycuda::complex<double> Projector13, ProjectorL13;
pycuda::complex<double> Projector20, ProjectorL20;
pycuda::complex<double> Projector23, ProjectorL23;
pycuda::complex<double> Projector31, ProjectorL31;
pycuda::complex<double> Projector32, ProjectorL32;
Projector00 = pycuda::complex<double>(aa,0.);
ProjectorL00 = pycuda::complex<double>(aaL,0.);
Projector03 = pycuda::complex<double>(bb,0.);
ProjectorL03 = pycuda::complex<double>(bbL,0.);
Projector11 = pycuda::complex<double>(aa,0.);
ProjectorL11 = pycuda::complex<double>(aaL,0.);
Projector21 = pycuda::complex<double>(bb,0.);
ProjectorL21 = pycuda::complex<double>(bbL,0.);
Projector30 = pycuda::complex<double>(cc,0.);
ProjectorL30 = pycuda::complex<double>(ccL,0.);
Projector12 = pycuda::complex<double>(cc,0.);
ProjectorL12 = pycuda::complex<double>(ccL,0.);
Projector22 = pycuda::complex<double>(dd,0.);
ProjectorL22 = pycuda::complex<double>(ddL,0.);
Projector33 = pycuda::complex<double>(dd,0.);
ProjectorL33 = pycuda::complex<double>(ddL,0.);
Projector01 = pycuda::complex<double>(0.,0.);
ProjectorL01 = pycuda::complex<double>(0.,0.);
Projector02 = pycuda::complex<double>(0.,0.);
ProjectorL02 = pycuda::complex<double>(0.,0.);
Projector10 = pycuda::complex<double>(0.,0.);
ProjectorL10 = pycuda::complex<double>(0.,0.);
Projector13 = pycuda::complex<double>(0.,0.);
ProjectorL13 = pycuda::complex<double>(0.,0.);
Projector20 = pycuda::complex<double>(0.,0.);
ProjectorL20 = pycuda::complex<double>(0.,0.);
Projector23 = pycuda::complex<double>(0.,0.);
ProjectorL23 = pycuda::complex<double>(0.,0.);
Projector31 = pycuda::complex<double>(0.,0.);
ProjectorL31 = pycuda::complex<double>(0.,0.);
Projector32 = pycuda::complex<double>(0.,0.);
ProjectorL32 = pycuda::complex<double>(0.,0.);
_Psi11[indexTotal] = (Projector00*psi11[indexTotal] + Projector01*psi21[indexTotal] +\
Projector02*psi31[indexTotal] + Projector03*psi41[indexTotal])*ProjectorL00 +\
(Projector00*psi12[indexTotal] + Projector01*psi22[indexTotal] +\
Projector02*psi32[indexTotal] + Projector03*psi42[indexTotal])*ProjectorL10 +\
(Projector00*psi13[indexTotal] + Projector01*psi23[indexTotal] +\
Projector02*psi33[indexTotal] + Projector03*psi43[indexTotal])*ProjectorL20 +\
(Projector00*psi14[indexTotal] + Projector01*psi24[indexTotal] +\
Projector02*psi34[indexTotal] + Projector03*psi44[indexTotal])*ProjectorL30;
_Psi12[indexTotal] = (Projector00*psi11[indexTotal] + Projector01*psi21[indexTotal] +\
Projector02*psi31[indexTotal] + Projector03*psi41[indexTotal])*ProjectorL01 +\
(Projector00*psi12[indexTotal] + Projector01*psi22[indexTotal] +\
Projector02*psi32[indexTotal] + Projector03*psi42[indexTotal])*ProjectorL11 +\
(Projector00*psi13[indexTotal] + Projector01*psi23[indexTotal] +\
Projector02*psi33[indexTotal] + Projector03*psi43[indexTotal])*ProjectorL21 +\
(Projector00*psi14[indexTotal] + Projector01*psi24[indexTotal] +\
Projector02*psi34[indexTotal] + Projector03*psi44[indexTotal])*ProjectorL31;
_Psi13[indexTotal] = (Projector00*psi11[indexTotal] + Projector01*psi21[indexTotal] +\
Projector02*psi31[indexTotal] + Projector03*psi41[indexTotal])*ProjectorL02 +\
(Projector00*psi12[indexTotal] + Projector01*psi22[indexTotal] +\
Projector02*psi32[indexTotal] + Projector03*psi42[indexTotal])*ProjectorL12 +\
(Projector00*psi13[indexTotal] + Projector01*psi23[indexTotal] +\
Projector02*psi33[indexTotal] + Projector03*psi43[indexTotal])*ProjectorL22 +\
(Projector00*psi14[indexTotal] + Projector01*psi24[indexTotal] +\
Projector02*psi34[indexTotal] + Projector03*psi44[indexTotal])*ProjectorL32;
_Psi14[indexTotal] = (Projector00*psi11[indexTotal] + Projector01*psi21[indexTotal] +\
Projector02*psi31[indexTotal] + Projector03*psi41[indexTotal])*ProjectorL03 +\
(Projector00*psi12[indexTotal] + Projector01*psi22[indexTotal] +\
Projector02*psi32[indexTotal] + Projector03*psi42[indexTotal])*ProjectorL13 +\
(Projector00*psi13[indexTotal] + Projector01*psi23[indexTotal] +\
Projector02*psi33[indexTotal] + Projector03*psi43[indexTotal])*ProjectorL23 +\
(Projector00*psi14[indexTotal] + Projector01*psi24[indexTotal] +\
Projector02*psi34[indexTotal] + Projector03*psi44[indexTotal])*ProjectorL33;
_Psi21[indexTotal] = (Projector10*psi11[indexTotal] + Projector11*psi21[indexTotal] +\
Projector12*psi31[indexTotal] + Projector13*psi41[indexTotal])*ProjectorL00 +\
(Projector10*psi12[indexTotal] + Projector11*psi22[indexTotal] +\
Projector12*psi32[indexTotal] + Projector13*psi42[indexTotal])*ProjectorL10 +\
(Projector10*psi13[indexTotal] + Projector11*psi23[indexTotal] +\
Projector12*psi33[indexTotal] + Projector13*psi43[indexTotal])*ProjectorL20 +\
(Projector10*psi14[indexTotal] + Projector11*psi24[indexTotal] +\
Projector12*psi34[indexTotal] + Projector13*psi44[indexTotal])*ProjectorL30;
_Psi22[indexTotal] = (Projector10*psi11[indexTotal] + Projector11*psi21[indexTotal] +\
Projector12*psi31[indexTotal] + Projector13*psi41[indexTotal])*ProjectorL01 +\
(Projector10*psi12[indexTotal] + Projector11*psi22[indexTotal] +\
Projector12*psi32[indexTotal] + Projector13*psi42[indexTotal])*ProjectorL11 +\
(Projector10*psi13[indexTotal] + Projector11*psi23[indexTotal] +\
Projector12*psi33[indexTotal] + Projector13*psi43[indexTotal])*ProjectorL21 +\
(Projector10*psi14[indexTotal] + Projector11*psi24[indexTotal] +\
Projector12*psi34[indexTotal] + Projector13*psi44[indexTotal])*ProjectorL31;
_Psi23[indexTotal] = (Projector10*psi11[indexTotal] + Projector11*psi21[indexTotal] +\
Projector12*psi31[indexTotal] + Projector13*psi41[indexTotal])*ProjectorL02 +\
(Projector10*psi12[indexTotal] + Projector11*psi22[indexTotal] +\
Projector12*psi32[indexTotal] + Projector13*psi42[indexTotal])*ProjectorL12 +\
(Projector10*psi13[indexTotal] + Projector11*psi23[indexTotal] +\
Projector12*psi33[indexTotal] + Projector13*psi43[indexTotal])*ProjectorL22 +\
(Projector10*psi14[indexTotal] + Projector11*psi24[indexTotal] +\
Projector12*psi34[indexTotal] + Projector13*psi44[indexTotal])*ProjectorL32;
_Psi24[indexTotal] = (Projector10*psi11[indexTotal] + Projector11*psi21[indexTotal] +\
Projector12*psi31[indexTotal] + Projector13*psi41[indexTotal])*ProjectorL03 +\
(Projector10*psi12[indexTotal] + Projector11*psi22[indexTotal] +\
Projector12*psi32[indexTotal] + Projector13*psi42[indexTotal])*ProjectorL13 +\
(Projector10*psi13[indexTotal] + Projector11*psi23[indexTotal] +\
Projector12*psi33[indexTotal] + Projector13*psi43[indexTotal])*ProjectorL23 +\
(Projector10*psi14[indexTotal] + Projector11*psi24[indexTotal] +\
Projector12*psi34[indexTotal] + Projector13*psi44[indexTotal])*ProjectorL33;
_Psi31[indexTotal] = (Projector20*psi11[indexTotal] + Projector21*psi21[indexTotal] +\
Projector22*psi31[indexTotal] + Projector23*psi41[indexTotal])*ProjectorL00 +\
(Projector20*psi12[indexTotal] + Projector21*psi22[indexTotal] +\
Projector22*psi32[indexTotal] + Projector23*psi42[indexTotal])*ProjectorL10 +\
(Projector20*psi13[indexTotal] + Projector21*psi23[indexTotal] +\
Projector22*psi33[indexTotal] + Projector23*psi43[indexTotal])*ProjectorL20 +\
(Projector20*psi14[indexTotal] + Projector21*psi24[indexTotal] +\
Projector22*psi34[indexTotal] + Projector23*psi44[indexTotal])*ProjectorL30;
_Psi32[indexTotal] = (Projector20*psi11[indexTotal] + Projector21*psi21[indexTotal] +\
Projector22*psi31[indexTotal] + Projector23*psi41[indexTotal])*ProjectorL01 +\
(Projector20*psi12[indexTotal] + Projector21*psi22[indexTotal] +\
Projector22*psi32[indexTotal] + Projector23*psi42[indexTotal])*ProjectorL11 +\
(Projector20*psi13[indexTotal] + Projector21*psi23[indexTotal] +\
Projector22*psi33[indexTotal] + Projector23*psi43[indexTotal])*ProjectorL21 +\
(Projector20*psi14[indexTotal] + Projector21*psi24[indexTotal] +\
Projector22*psi34[indexTotal] + Projector23*psi44[indexTotal])*ProjectorL31;
_Psi33[indexTotal] = (Projector20*psi11[indexTotal] + Projector21*psi21[indexTotal] +\
Projector22*psi31[indexTotal] + Projector23*psi41[indexTotal])*ProjectorL02 +\
(Projector20*psi12[indexTotal] + Projector21*psi22[indexTotal] +\
Projector22*psi32[indexTotal] + Projector23*psi42[indexTotal])*ProjectorL12 +\
(Projector20*psi13[indexTotal] + Projector21*psi23[indexTotal] +\
Projector22*psi33[indexTotal] + Projector23*psi43[indexTotal])*ProjectorL22 +\
(Projector20*psi14[indexTotal] + Projector21*psi24[indexTotal] +\
Projector22*psi34[indexTotal] + Projector23*psi44[indexTotal])*ProjectorL32;
_Psi34[indexTotal] = (Projector20*psi11[indexTotal] + Projector21*psi21[indexTotal] +\
Projector22*psi31[indexTotal] + Projector23*psi41[indexTotal])*ProjectorL03 +\
(Projector20*psi12[indexTotal] + Projector21*psi22[indexTotal] +\
Projector22*psi32[indexTotal] + Projector23*psi42[indexTotal])*ProjectorL13 +\
(Projector20*psi13[indexTotal] + Projector21*psi23[indexTotal] +\
Projector22*psi33[indexTotal] + Projector23*psi43[indexTotal])*ProjectorL23 +\
(Projector20*psi14[indexTotal] + Projector21*psi24[indexTotal] +\
Projector22*psi34[indexTotal] + Projector23*psi44[indexTotal])*ProjectorL33;
_Psi41[indexTotal] = (Projector30*psi11[indexTotal] + Projector31*psi21[indexTotal] +\
Projector32*psi31[indexTotal] + Projector33*psi41[indexTotal])*ProjectorL00 +\
(Projector30*psi12[indexTotal] + Projector31*psi22[indexTotal] +\
Projector32*psi32[indexTotal] + Projector33*psi42[indexTotal])*ProjectorL10 +\
(Projector30*psi13[indexTotal] + Projector31*psi23[indexTotal] +\
Projector32*psi33[indexTotal] + Projector33*psi43[indexTotal])*ProjectorL20 +\
(Projector30*psi14[indexTotal] + Projector31*psi24[indexTotal] +\
Projector32*psi34[indexTotal] + Projector33*psi44[indexTotal])*ProjectorL30;
_Psi42[indexTotal] = (Projector30*psi11[indexTotal] + Projector31*psi21[indexTotal] +\
Projector32*psi31[indexTotal] + Projector33*psi41[indexTotal])*ProjectorL01 +\
(Projector30*psi12[indexTotal] + Projector31*psi22[indexTotal] +\
Projector32*psi32[indexTotal] + Projector33*psi42[indexTotal])*ProjectorL11 +\
(Projector30*psi13[indexTotal] + Projector31*psi23[indexTotal] +\
Projector32*psi33[indexTotal] + Projector33*psi43[indexTotal])*ProjectorL21 +\
(Projector30*psi14[indexTotal] + Projector31*psi24[indexTotal] +\
Projector32*psi34[indexTotal] + Projector33*psi44[indexTotal])*ProjectorL31;
_Psi43[indexTotal] = (Projector30*psi11[indexTotal] + Projector31*psi21[indexTotal] +\
Projector32*psi31[indexTotal] + Projector33*psi41[indexTotal])*ProjectorL02 +\
(Projector30*psi12[indexTotal] + Projector31*psi22[indexTotal] +\
Projector32*psi32[indexTotal] + Projector33*psi42[indexTotal])*ProjectorL12 +\
(Projector30*psi13[indexTotal] + Projector31*psi23[indexTotal] +\
Projector32*psi33[indexTotal] + Projector33*psi43[indexTotal])*ProjectorL22 +\
(Projector30*psi14[indexTotal] + Projector31*psi24[indexTotal] +\
Projector32*psi34[indexTotal] + Projector33*psi44[indexTotal])*ProjectorL32;
_Psi44[indexTotal] = (Projector30*psi11[indexTotal] + Projector31*psi21[indexTotal] +\
Projector32*psi31[indexTotal] + Projector33*psi41[indexTotal])*ProjectorL03 +\
(Projector30*psi12[indexTotal] + Projector31*psi22[indexTotal] +\
Projector32*psi32[indexTotal] + Projector33*psi42[indexTotal])*ProjectorL13 +\
(Projector30*psi13[indexTotal] + Projector31*psi23[indexTotal] +\
Projector32*psi33[indexTotal] + Projector33*psi43[indexTotal])*ProjectorL23 +\
(Projector30*psi14[indexTotal] + Projector31*psi24[indexTotal] +\
Projector32*psi34[indexTotal] + Projector33*psi44[indexTotal])*ProjectorL33;
}
"""
#..........................................................................................................
Potential_0_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double Potential0(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* preExpectationValue,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44 ,
double t)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
preExpectationValue[indexTotal] = Potential0( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
Potential_1_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double Potential1(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* preExpectationValue,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44 ,
double t )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
preExpectationValue[indexTotal] = Potential1( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
Potential_2_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double Potential2(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* preExpectationValue,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44 ,
double t)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
preExpectationValue[indexTotal] = Potential2( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
Potential_3_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double Potential3(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* preExpectationValue,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44 ,
double t)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
preExpectationValue[indexTotal] = Potential3( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
#..........................................................................
D_1_Potential_0_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double D_1_Potential_0(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* out,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44,
double t)
{
const int X_gridDIM = blockDim.x * gridDim.z;
//const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
//double p = dp*( i - 0.5*P_gridDIM );
out[indexTotal] = D_1_Potential_0( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
X1_D_1_Potential_0_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double D_1_Potential_0(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* out,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44,
double t)
{
const int X_gridDIM = blockDim.x * gridDim.z;
//const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
//double p = dp*( i - 0.5*P_gridDIM );
out[indexTotal] = x*D_1_Potential_0( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
P1_D_1_Potential_0_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double D_1_Potential_0(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* out,
pycuda::complex<double>* W11, pycuda::complex<double>* W22, pycuda::complex<double>* W33, pycuda::complex<double>* W44,
double t)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
out[indexTotal] = p*D_1_Potential_0( t, x)*(W11[indexTotal] + W22[indexTotal] + W33[indexTotal] + W44[indexTotal]);
}
"""
X_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
//double theta = dtheta*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W11[indexTotal]*x;
_out += W22[indexTotal]*x;
_out += W33[indexTotal]*x;
_out += W44[indexTotal]*x;
out[indexTotal] = _out;
}
"""
P_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
//double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W11[indexTotal]*p;
_out += W22[indexTotal]*p;
_out += W33[indexTotal]*p;
_out += W44[indexTotal]*p;
out[indexTotal] = _out;
}
"""
XP_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W11[indexTotal]*x*p;
_out += W22[indexTotal]*x*p;
_out += W33[indexTotal]*x*p;
_out += W44[indexTotal]*x*p;
out[indexTotal] = _out;
}
"""
XX_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
//double theta = dtheta*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W11[indexTotal]*x*x;
_out += W22[indexTotal]*x*x;
_out += W33[indexTotal]*x*x;
_out += W44[indexTotal]*x*x;
out[indexTotal] = _out;
}
"""
PP_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
//const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
//double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W11[indexTotal]*p*p;
_out += W22[indexTotal]*p*p;
_out += W33[indexTotal]*p*p;
_out += W44[indexTotal]*p*p;
out[indexTotal] = _out;
}
"""
Alpha_1_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W14, pycuda::complex<double> *W23, pycuda::complex<double> *W32, pycuda::complex<double> *W41)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
//const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
//double x = dx*( j - 0.5*X_gridDIM );
//double theta = dtheta*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W14[indexTotal];
_out += W23[indexTotal];
_out += W32[indexTotal];
_out += W41[indexTotal];
out[indexTotal] = _out;
}
"""
Alpha_2_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W14, pycuda::complex<double> *W23, pycuda::complex<double> *W32, pycuda::complex<double> *W41)
{
pycuda::complex<double> I = pycuda::complex<double>(0.,1.);
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
//const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
//double x = dx*( j - 0.5*X_gridDIM );
//double theta = dtheta*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = -I*W14[indexTotal];
_out += I*W23[indexTotal];
_out += -I*W32[indexTotal];
_out += I*W41[indexTotal];
out[indexTotal] = _out;
}
"""
P1_Alpha_1_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W14, pycuda::complex<double> *W23, pycuda::complex<double> *W32, pycuda::complex<double> *W41)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
//const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
//double x = dx*( j - 0.5*X_gridDIM );
double p = dp*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W14[indexTotal];
_out += W23[indexTotal];
_out += W32[indexTotal];
_out += W41[indexTotal];
out[indexTotal] = _out*p;
}
"""
X1_Alpha_1_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__global__ void Kernel( pycuda::complex<double> *out,
pycuda::complex<double> *W14, pycuda::complex<double> *W23, pycuda::complex<double> *W32, pycuda::complex<double> *W41)
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
//const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double x = dx*( j - 0.5*X_gridDIM );
//double p = dp*( i - 0.5*P_gridDIM );
pycuda::complex<double> _out;
_out = W14[indexTotal];
_out += W23[indexTotal];
_out += W32[indexTotal];
_out += W41[indexTotal];
out[indexTotal] = _out*x;
}
"""
#..........................................................................................................
# Non-linear phase space
gpu_sum_axis0_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel( pycuda::complex<double> *Probability_x ,
pycuda::complex<double> *W11, pycuda::complex<double> *W22, pycuda::complex<double> *W33, pycuda::complex<double> *W44,
int P_DIM)
{
int X_DIM = blockDim.x*gridDim.x;
const int index_x = threadIdx.x + blockDim.x*blockIdx.x ;
pycuda::complex<double> sum=0.;
for(int i=0; i<P_DIM; i++ ){
sum += W11[ index_x + i*X_DIM ];
sum += W22[ index_x + i*X_DIM ];
sum += W33[ index_x + i*X_DIM ];
sum += W44[ index_x + i*X_DIM ];
}
Probability_x[ index_x ] = pycuda::real(sum);
}
"""
roll_FirstRowCopy_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel( pycuda::complex<double> *W, pycuda::complex<double> *Probability_X , int P_DIM)
{
int X_DIM = blockDim.x*gridDim.x;
const int index_x = threadIdx.x + blockDim.x*blockIdx.x ;
pycuda::complex<double> firstRow = Probability_X[index_x];
for(int i=0; i<P_DIM; i++ ) W[ index_x + i*X_DIM ] = firstRow;
}
"""
#.................................................................
theta_fp_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__device__ double f( double p)
{
return %s;
}
__global__ void Kernel( pycuda::complex<double> *B )
{
const int X_gridDIM = blockDim.x * gridDim.z;
const int P_gridDIM = gridDim.x;
const int indexTotal = threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM * blockIdx.x ;
const int i = (blockIdx.x + P_gridDIM/2) %% P_gridDIM ;
const int j = (threadIdx.x + blockIdx.z*blockDim.x + X_gridDIM/2) %% X_gridDIM ;
double p = dp*( i - 0.5*P_gridDIM );
if( p >= 0. )
B[ indexTotal ] *= f(p);
else
B[ indexTotal ] *= -f(p);
}
"""
#................................................................
class GPU_WignerDirac2D_4x4:
"""
Propagator in the X-Theta representation
This version propagates the 16 components of the Wigner function components
This algorithm advances in time at double the rate of the ordinary Dirac propagator dt = 2 dx/c
"""
def __init__(self, X_gridDIM, P_gridDIM, X_amplitude, P_amplitude,
mass, c, dt,timeSteps, skipFrames = 1,
frameSaveMode='Density', antiParticleNorm = True, antiParticleStepFiltering=False,
computeEnergy = 'False'):
self.mass = mass
self.c = c
self.dt = dt
self.timeSteps = timeSteps
self.frameSaveMode = frameSaveMode
self.skipFrames = skipFrames
self.antiParticleNorm = antiParticleNorm
self.antiParticleStepFiltering = antiParticleStepFiltering
self.computeEnergy = computeEnergy
#self.dampingModel = dampingModel
self.X_gridDIM = X_gridDIM
self.P_gridDIM = P_gridDIM
self.X_amplitude = X_amplitude
self.P_amplitude = P_amplitude
self.dX = 2.*X_amplitude/float(X_gridDIM)
self.dP = 2.*P_amplitude/float(P_gridDIM)
self.dTheta = 2.*np.pi/(2.*P_amplitude)
self.Theta_amplitude = self.dTheta*P_gridDIM/2.
self.dLambda = 2.*np.pi/(2.*X_amplitude)
self.Lambda_amplitude = self.dLambda*X_gridDIM/2.
self.X_range = np.linspace(-self.X_amplitude , self.X_amplitude -self.dX , self.X_gridDIM )
self.Lambda_range = np.linspace(-self.Lambda_amplitude , self.Lambda_amplitude-self.dLambda ,self.X_gridDIM)
self.Theta_range = np.linspace(-self.Theta_amplitude , self.Theta_amplitude - self.dTheta , self.P_gridDIM)
self.P_range = np.linspace(-self.P_amplitude , self.P_amplitude-self.dP , self.P_gridDIM)
self.X = fftpack.fftshift(self.X_range)[np.newaxis,:]
self.Theta = fftpack.fftshift(self.Theta_range)[:,np.newaxis]
self.Lambda = fftpack.fftshift(self.Lambda_range)[np.newaxis,:]
self.P = fftpack.fftshift(self.P_range)[:,np.newaxis]
#........................... Strings .........................................
self.CUDA_constants = '__constant__ double Bare_mass = %f; '%(self.mass)
self.CUDA_constants += '__constant__ double mass_half=%f;'%( 0.0001 )
self.CUDA_constants += '__constant__ double c=%f; '%self.c
self.CUDA_constants += '__constant__ double dt=%f; '%self.dt
self.CUDA_constants += '__constant__ double dx=%f; '%self.dX
self.CUDA_constants += '__constant__ double dlambda=%f; '%self.dLambda
self.CUDA_constants += '__constant__ double dp=%f; '%self.dP
self.CUDA_constants += '__constant__ double dtheta=%f; '%self.dTheta
try:
self.CUDA_constants += '__constant__ double D_Theta = %f;'%(self.D_Theta )
self.CUDA_constants += '__constant__ double D_Lambda = %f;'%(self.D_Lambda)
except:
pass
try:
self.CUDA_constants += '__constant__ double lambdaBar = %f;'%(self.lambdaBar)
except:
pass
#............................... Initializing CUDA ...........................
self.D_1_Potential_0_String = sympy.ccode( sympy.N( sympy.diff(self.Potential_0_String,'x') ) )
self.D_1_Potential_0_String = str( self.D_1_Potential_0_String ) + ' + 0.*x'
self.D_1_Potential_1_String = sympy.ccode( sympy.N( sympy.diff(self.Potential_1_String,'x') ) )
self.D_1_Potential_1_String = str( self.D_1_Potential_1_String ) + ' + 0.*x'
self.D_1_Potential_2_String = sympy.ccode( sympy.N( sympy.diff(self.Potential_2_String,'x') ) )
self.D_1_Potential_2_String = str( self.D_1_Potential_2_String ) + ' + 0.*x'
self.D_1_Potential_3_String = sympy.ccode( sympy.N( sympy.diff(self.Potential_3_String,'x') ) )
self.D_1_Potential_3_String = str( self.D_1_Potential_3_String ) + ' + 0.*x'
print ' D_1_Potential_0 = ', self.D_1_Potential_0_String
self.Make_CUFFTPlan()
self.Allocate_GPUVariables()
self.Compiling_CUDA_functions()
self.phase_LambdaTheta_GPU = gpuarray.to_gpu(
np.ascontiguousarray( np.exp( 0.5*1j*self.Lambda*self.Theta ) , dtype=np.complex128) )
#-------------------------------------------------------------------------------------------------------------------
# Gaussian PARTICLE spinors
#-------------------------------------------------------------------------------------------------------------------
def Make_CUFFTPlan(self):
self.plan_Z2Z_1D_Axes0 = cuda_fft.Plan_Z2Z_2D_Axis0( (self.P_gridDIM,self.X_gridDIM) )
self.plan_Z2Z_1D_Axes1 = cuda_fft.Plan_Z2Z_2D_Axis1( (self.P_gridDIM,self.X_gridDIM) )
self.plan_Z2Z_1D = cuda_fft.Plan_Z2Z( (self.X_gridDIM,) , batch=1 )
def Allocate_GPUVariables(self):
self.X_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.X + 0.*self.P, dtype = np.complex128) )
self.P_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.P + 0.*self.X, dtype = np.complex128) )
self.Theta_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Theta + 0.*self.X, dtype = np.complex128) )
self.XX_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.X**2 + 0.*self.P, dtype=np.complex128) )
self.PP_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.P**2 + 0.*self.X, dtype=np.complex128) )
self.XP_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.P*self.X ,dtype=np.complex128) )
self.Potential0 = self.PotentialFunction_0( 0., self.X + 0j*self.P )
self.Potential0_GPU = gpuarray.to_gpu( self.Potential0 )
self.D_1_Potential0_GPU = gpuarray.to_gpu( self.D_1_PotentialFunction_0( 0., self.X + 0j*self.P) )
#self.D_1_Potential1_GPU = gpuarray.to_gpu( self.D_1_PotentialFunction_1( se']lf.X + 0j*self.P) )
#self.D_1_Potential2_GPU = gpuarray.to_gpu( self.D_1_PotentialFunction_2( self.X + 0j*self.P) )
#self.D_1_Potential3_GPU = gpuarray.to_gpu( self.D_1_PotentialFunction_3( self.X + 0j*self.P) )
self.X1_D_1_Potential0_GPU = gpuarray.to_gpu( self.X*self.D_1_PotentialFunction_0( 0., self.X + 0j*self.P) )
self.P1_D_1_Potential0_GPU = gpuarray.to_gpu( self.P*self.D_1_PotentialFunction_0( 0., self.X + 0j*self.P) )
def Compiling_CUDA_functions(self):
#print CUDAsource_P_plus_Lambda%(self.CUDA_constants)
self.DiracPropagator_P_plus_Lambda = \
SourceModule(CUDAsource_P_plus_Lambda%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.DiracPropagator_P_minus_Lambda = \
SourceModule(CUDAsource_P_minus_Lambda%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
DiracPropagator_X_minus_Theta_source = DiracPropagator_X_minus_Theta_source_Base%(
self.CUDA_constants,self.Potential_0_String,self.Potential_1_String,
self.Potential_2_String,self.Potential_3_String,self.Mass_String)
self.DiracPropagator_X_minus_Theta = \
SourceModule( DiracPropagator_X_minus_Theta_source ,arch="sm_20").get_function( "DiracPropagation4_Kernel" )
DiracPropagator_X_plus_Theta_source = DiracPropagator_X_plus_Theta_source_Base%(
self.CUDA_constants,self.Potential_0_String,self.Potential_1_String,
self.Potential_2_String,self.Potential_3_String,self.Mass_String)
self.DiracPropagator_X_plus_Theta = \
SourceModule( DiracPropagator_X_plus_Theta_source ,arch="sm_20").get_function( "DiracPropagation4_Kernel" )
self.gpu_array_copy_Function = SourceModule(gpu_array_copy_source).get_function( "Kernel" )
self.FilterElectrons_Function = \
SourceModule(BaseCUDAsource_FilterGPU%(self.CUDA_constants),arch="sm_20").get_function("Filter_Kernel" )
self.AbsorbBoundary_x_Function = SourceModule(CUDAsource_AbsorbBoundary_x).get_function("Kernel")
self.pickup_negatives_Function = SourceModule(pickup_negatives_source).get_function("pickup_negatives_Kernel")
self.transmission_Function = SourceModule(
transmission_source%(self.CUDA_constants)).get_function("transmission_Kernel")
try :
self.DiracPropagator_DampingODM = \
SourceModule( DiracPropagator_DampingODM_source%(self.CUDA_constants),
arch="sm_20").get_function("DampingODM_Kernel")
except:
pass
self.Potential_0_Average_Function = \
SourceModule( Potential_0_Average_source%(
self.CUDA_constants,self.Potential_0_String),arch="sm_20").get_function("Kernel" )
self.Potential_1_Average_Function = \
SourceModule( Potential_1_Average_source%(
self.CUDA_constants,self.Potential_1_String),arch="sm_20").get_function("Kernel" )
self.Potential_2_Average_Function = \
SourceModule( Potential_2_Average_source%(
self.CUDA_constants,self.Potential_2_String),arch="sm_20").get_function("Kernel" )
self.Potential_3_Average_Function = \
SourceModule( Potential_3_Average_source%(
self.CUDA_constants,self.Potential_3_String),arch="sm_20").get_function("Kernel" )
#self.D_1_Potential_0_ExpectationValue_Function = \
#SourceModule( D_1_Potential_0_Expectation_source%(
# self.CUDA_constants),arch="sm_20").get_function("Kernel" )
self.X_Average_Function = \
SourceModule( X_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.P_Average_Function = \
SourceModule( P_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.XP_Average_Function = \
SourceModule( XP_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.XX_Average_Function = \
SourceModule( XX_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.Alpha_1_Average_Function = \
SourceModule( Alpha_1_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.P1_Alpha_1_Average_Function = \
SourceModule( P1_Alpha_1_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.X1_Alpha_1_Average_Function = \
SourceModule( X1_Alpha_1_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.PP_Average_Function = \
SourceModule( PP_Average_source%(self.CUDA_constants),arch="sm_20").get_function( "Kernel" )
self.D_1_Potential_0_Average_Function = \
SourceModule( D_1_Potential_0_Average_source%(
self.CUDA_constants,self.D_1_Potential_0_String),arch="sm_20").get_function( "Kernel" )
self.X1_D_1_Potential_0_Average_Function = \
SourceModule( X1_D_1_Potential_0_Average_source%(
self.CUDA_constants,self.D_1_Potential_0_String),arch="sm_20").get_function( "Kernel" )
self.P1_D_1_Potential_0_Average_Function = \
SourceModule( P1_D_1_Potential_0_Average_source%(
self.CUDA_constants,self.D_1_Potential_0_String),arch="sm_20").get_function( "Kernel" )
#
self.roll_FirstRowCopy_Function = SourceModule( roll_FirstRowCopy_source%self.CUDA_constants, arch="sm_20").get_function( "Kernel" )
self.gpu_sum_axis0_Function = SourceModule( gpu_sum_axis0_source%self.CUDA_constants, arch="sm_20").get_function( "Kernel" )
try:
self.theta_fp_Damping_Function = SourceModule(\
theta_fp_source%(self.CUDA_constants,self.fp_Damping_String),
arch="sm_20").get_function("Kernel")
except AttributeError:
pass
#......................................................................................
def Potential_0_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t):
self.Potential_0_Average_Function( temp_GPU,
W11_GPU, W22_GPU, W33_GPU, W44_GPU, t , block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP * gpuarray.sum(temp_GPU).get()
def Potential_1_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU , t):
self.Potential_1_Average_Function( temp_GPU,
W11_GPU, W22_GPU, W33_GPU, W44_GPU, t, block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP * gpuarray.sum(temp_GPU).get()
def Potential_2_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t):
self.Potential_2_Average_Function( temp_GPU,
W11_GPU, W22_GPU, W33_GPU, W44_GPU, t, block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP * gpuarray.sum(temp_GPU).get()
def Potential_3_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t):
self.Potential_3_Average_Function( temp_GPU,
W11_GPU, W22_GPU, W33_GPU, W44_GPU, t, block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP * gpuarray.sum(temp_GPU).get()
#def D_1_Potential_0_Average(self, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t, temp_GPU):
# self.D_1_Potential_0_Average_Function(
# W11_GPU, W22_GPU, W33_GPU, W44_GPU, t, temp_GPU, block=self.blockCUDA, grid=self.gridCUDA )
# return gpuarray.sum(temp_GPU).get()
#......................................................................................
def PotentialFunction_0(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.Potential_0_String, np.__dict__, locals() )
def PotentialFunction_1(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.Potential_1_String, np.__dict__, locals() )
def PotentialFunction_2(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.Potential_2_String, np.__dict__, locals() )
def PotentialFunction_3(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.Potential_3_String, np.__dict__, locals() )
#-------
def D_1_PotentialFunction_0(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.D_1_Potential_0_String , np.__dict__, locals() )
def D_1_PotentialFunction_1(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.D_1_Potential_1_String , np.__dict__, locals() )
def D_1_PotentialFunction_2(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.D_1_Potential_2_String , np.__dict__, locals() )
def D_1_PotentialFunction_3(self, t, x ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.D_1_Potential_3_String , np.__dict__, locals() )
#............................
def GaussianSpinor_ParticleUp(self,x,px,s,X) :
"""
x,p: Gaussian center
s: Gaussian standard deviation in x
X: variable
"""
rho = np.exp(1j*X*px) * np.exp( -0.5*( (X - x)/s )**2 ) + 0j
p0 = np.sqrt( px*px + self.mass*self.mass*self.c*self.c )
Psi1 = rho*( p0 + self.mass*self.c )
Psi2 = X*0j
Psi3 = X*0j
Psi4 = rho*px
return np.array([Psi1, Psi2, Psi3, Psi4 ])
def GaussianSpinor_ParticleDown(self,x,px,s,X):
"""
x,p: Gaussian center
s: Gaussian standard deviation in x
X: variable
"""
rho = np.exp(1j*X*px) * np.exp( -0.5*( (X - x)/s )**2 ) + 0j
p0 = np.sqrt( px*px + self.mass*self.mass*self.c*self.c )
Psi1 = 1j*X*0j
Psi2 = 1j*rho*( p0 + self.mass*self.c )
Psi3 = 1j*rho*px
Psi4 = 1j*X*0j
return np.array([Psi1, Psi2, Psi3, Psi4 ])
#
def GaussianSpinor_AntiParticleDown(self,x,px,s,X) :
"""
x,p: Gaussian center
s: Gaussian standard deviation in x
X: variable
"""
rho = np.exp(1j*X*px) * np.exp( -0.5*( (X - x)/s )**2 ) + 0j
p0 = np.sqrt( px*px + self.mass*self.mass*self.c*self.c )
Psi1 = X*0j
Psi2 = rho*px
Psi3 = rho*( -p0 - self.mass*self.c )
Psi4 = X*0j
return -1j*np.array([Psi1, Psi2, Psi3, Psi4 ])
def GaussianSpinor_AntiParticleUp(self,x,px,s,X) :
"""
x,p: Gaussian center
s: Gaussian standard deviation in x
X: variable
"""
rho = np.exp(1j*X*px) * np.exp( -0.5*( (X - x)/s )**2 ) + 0j
p0 = np.sqrt( px*px + self.mass*self.mass*self.c*self.c )
Psi1 = rho*px
Psi2 = X*0j
Psi3 = X*0j
Psi4 = rho*( -p0 - self.mass*self.c )
return -1j*np.array([Psi1, Psi2, Psi3, Psi4 ])
#
def SpinorNorm(self,Psi):
norm = np.sum( np.abs(Psi[0])**2 )*self.dX*self.dP
norm += np.sum( np.abs(Psi[1])**2 )*self.dX*self.dP
norm += np.sum( np.abs(Psi[2])**2 )*self.dX*self.dP
norm += np.sum( np.abs(Psi[3])**2 )*self.dX*self.dP
return norm
#...................................................................................
def _ConstructMajoranaSpinor(self, Psi_real ):
"""
returns a spinor in the stanbdard representation from a spinor in the Majorana representation
Depreciated
"""
PsiMajorana = np.empty_like( Psi_real + 0j )
PsiMajorana[0] = ( -Psi_real[0] + 1j*Psi_real[3] )/np.sqrt(2)
PsiMajorana[1] = ( -Psi_real[1] - 1j*Psi_real[2] )/np.sqrt(2)
PsiMajorana[2] = ( -1j*Psi_real[1] - Psi_real[2] )/np.sqrt(2)
PsiMajorana[3] = ( 1j*Psi_real[0] -Psi_real[3] )/np.sqrt(2)
return PsiMajorana
#...................................................................................
def MajoranaSpinorPlus(self, Psi ):
PsiMajorana = np.empty_like( Psi + 0j )
PsiMajorana[0] = 0.5*( Psi[0] - Psi[3].conj() )
PsiMajorana[1] = 0.5*( Psi[1] + Psi[2].conj() )
PsiMajorana[2] = 0.5*( Psi[2] + Psi[1].conj() )
PsiMajorana[3] = 0.5*( Psi[3] - Psi[0].conj() )
return PsiMajorana
def MajoranaSpinorMinus(self, Psi ):
PsiMajorana = np.empty_like( Psi + 0j )
PsiMajorana[0] = 0.5*( Psi[0] + Psi[3].conj() )
PsiMajorana[1] = 0.5*( Psi[1] - Psi[2].conj() )
PsiMajorana[2] = 0.5*( Psi[2] - Psi[1].conj() )
PsiMajorana[3] = 0.5*( Psi[3] + Psi[0].conj() )
return PsiMajorana
#...................................................................................
def WignerDirac_MatrixProduct(self, outC , inA , inB):
sumM = np.zeros([ self.P_gridDIM , self.X_gridDIM], dtype = np.complex128 )
for i in range(4) :
for j in range(4):
sumM *=0
for k in range(4):
sumM += inA[i,k] * inB[k,j]
outC[i,j][:,:] = sumM
def DiracElectronProjector(self,p,sign):
mass = self.mass
c = self.c
sqrtp = 2*np.sqrt( (mass*c**2)**2 + (c*p)**2 )
aa = 0.5 + sign*mass*c*c/sqrtp
bb = c*sign*p/sqrtp
cc = c*sign*p/sqrtp
dd = 0.5 - sign*mass*c*c/sqrtp
z = np.zeros([ self.P_gridDIM , self.X_gridDIM])
return np.array( [ [aa , z , z , bb ],
[z , aa , cc , z ],
[z , bb , dd , z ],
[cc , z , z , dd] ])
def FilterElectrons(self, W ,sign):
ElectronProjectorL = self.DiracElectronProjector( self.P + 0.5*self.Lambda , sign)
ElectronProjectorR = self.DiracElectronProjector( self.P - 0.5*self.Lambda , sign)
for i in range(4):
for j in range(4):
W[i,j][:,:] = self.Fourier_X_To_Lambda(W[i,j]) #fftpack.fft( W[i,j], axis=1 )
W_ = np.empty_like(W)
self.WignerDirac_MatrixProduct( W_ , ElectronProjectorL , W )
self.WignerDirac_MatrixProduct( W , W_ , ElectronProjectorR )
for i in range(4):
for j in range(4):
W[i,j][:,:] = self.Fourier_Lambda_To_X(W[i,j]) #fftpack.ifft( W[i,j], axis=1 )
#................
def FilterElectrons_GPU(self,
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
sign):
self.FilterElectrons_Function(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
sign, block=self.blockCUDA, grid=self.gridCUDA )
#..............
def AntiParticlePopulation(self,
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
sign = np.int32(-1)
self.FilterElectrons_GPU(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
sign)
self.Fourier_Lambda_To_X_GPU( _W11_GPU )
self.Fourier_Lambda_To_X_GPU( _W22_GPU )
self.Fourier_Lambda_To_X_GPU( _W33_GPU )
self.Fourier_Lambda_To_X_GPU( _W44_GPU )
return self.Wigner_4x4_Norm_GPU( _W11_GPU , _W22_GPU , _W33_GPU, _W44_GPU)
#................
def ParticlePopulation(self,
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
sign = np.int32(1)
self.FilterElectrons_GPU(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
sign)
self.Fourier_Lambda_To_X_GPU( _W11_GPU )
self.Fourier_Lambda_To_X_GPU( _W22_GPU )
self.Fourier_Lambda_To_X_GPU( _W33_GPU )
self.Fourier_Lambda_To_X_GPU( _W44_GPU )
return self.Wigner_4x4_Norm_GPU( _W11_GPU , _W22_GPU , _W33_GPU, _W44_GPU)
#................
def FilterAntiParticles(self,
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
"""
"""
sign_positive = np.int32(1)
"""self.Fourier_4X4_X_To_Lambda_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )"""
self.FilterElectrons_GPU(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
sign_positive)
#..........................................
def Fourier_X_To_Lambda(self,W):
return fftpack.fft( W , axis=1 )
def Fourier_Lambda_To_X(self,W):
return fftpack.ifft( W , axis=1 )
def Fourier_Theta_To_P(self,W):
return fftpack.ifft( W , axis=0 )
def Fourier_P_To_Theta(self,W):
return fftpack.fft( W , axis=0 )
def Fourier_4X4_Theta_To_P(self,W):
for i in range(4):
for j in range(4):
W[i,j][:,:] = self.Fourier_Theta_To_P( W[i,j] )
def Fourier_4X4_P_To_Theta(self,W):
for i in range(4):
for j in range(4):
W[i,j][:,:] = self.Fourier_P_To_Theta( W[i,j] )
#............................................................................
def Fourier_P_To_Theta_GPU(self, W_out_GPU ):
cuda_fft.fft_Z2Z( W_out_GPU , W_out_GPU , self.plan_Z2Z_1D_Axes0 )
def Fourier_Theta_To_P_GPU(self, W_out_GPU ):
cuda_fft.ifft_Z2Z( W_out_GPU , W_out_GPU , self.plan_Z2Z_1D_Axes0 )
W_out_GPU *= 1./float(self.P_gridDIM) # NEGATIVE SIGN
def Fourier_X_To_Lambda_GPU(self,W_out_GPU):
cuda_fft.fft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_1D_Axes1 )
def Fourier_Lambda_To_X_GPU(self,W_out_GPU):
cuda_fft.ifft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_1D_Axes1 )
W_out_GPU *= 1./float(self.X_gridDIM)
def Fourier_4X4_P_To_Theta_GPU(self, W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
self.Fourier_P_To_Theta_GPU( W11_GPU )
self.Fourier_P_To_Theta_GPU( W12_GPU )
self.Fourier_P_To_Theta_GPU( W13_GPU )
self.Fourier_P_To_Theta_GPU( W14_GPU )
self.Fourier_P_To_Theta_GPU( W21_GPU )
self.Fourier_P_To_Theta_GPU( W22_GPU )
self.Fourier_P_To_Theta_GPU( W23_GPU )
self.Fourier_P_To_Theta_GPU( W24_GPU )
self.Fourier_P_To_Theta_GPU( W31_GPU )
self.Fourier_P_To_Theta_GPU( W32_GPU )
self.Fourier_P_To_Theta_GPU( W33_GPU )
self.Fourier_P_To_Theta_GPU( W34_GPU )
self.Fourier_P_To_Theta_GPU( W41_GPU )
self.Fourier_P_To_Theta_GPU( W42_GPU )
self.Fourier_P_To_Theta_GPU( W43_GPU )
self.Fourier_P_To_Theta_GPU( W44_GPU )
def Fourier_4X4_Theta_To_P_GPU(self, W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
self.Fourier_Theta_To_P_GPU( W11_GPU )
self.Fourier_Theta_To_P_GPU( W12_GPU )
self.Fourier_Theta_To_P_GPU( W13_GPU )
self.Fourier_Theta_To_P_GPU( W14_GPU )
self.Fourier_Theta_To_P_GPU( W21_GPU )
self.Fourier_Theta_To_P_GPU( W22_GPU )
self.Fourier_Theta_To_P_GPU( W23_GPU )
self.Fourier_Theta_To_P_GPU( W24_GPU )
self.Fourier_Theta_To_P_GPU( W31_GPU )
self.Fourier_Theta_To_P_GPU( W32_GPU )
self.Fourier_Theta_To_P_GPU( W33_GPU )
self.Fourier_Theta_To_P_GPU( W34_GPU )
self.Fourier_Theta_To_P_GPU( W41_GPU )
self.Fourier_Theta_To_P_GPU( W42_GPU )
self.Fourier_Theta_To_P_GPU( W43_GPU )
self.Fourier_Theta_To_P_GPU( W44_GPU )
def Fourier_4X4_X_To_Lambda_GPU(self,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
self.Fourier_X_To_Lambda_GPU( W11_GPU )
self.Fourier_X_To_Lambda_GPU( W12_GPU )
self.Fourier_X_To_Lambda_GPU( W13_GPU )
self.Fourier_X_To_Lambda_GPU( W14_GPU )
self.Fourier_X_To_Lambda_GPU( W21_GPU )
self.Fourier_X_To_Lambda_GPU( W22_GPU )
self.Fourier_X_To_Lambda_GPU( W23_GPU )
self.Fourier_X_To_Lambda_GPU( W24_GPU )
self.Fourier_X_To_Lambda_GPU( W31_GPU )
self.Fourier_X_To_Lambda_GPU( W32_GPU )
self.Fourier_X_To_Lambda_GPU( W33_GPU )
self.Fourier_X_To_Lambda_GPU( W34_GPU )
self.Fourier_X_To_Lambda_GPU( W41_GPU )
self.Fourier_X_To_Lambda_GPU( W42_GPU )
self.Fourier_X_To_Lambda_GPU( W43_GPU )
self.Fourier_X_To_Lambda_GPU( W44_GPU )
def Fourier_4X4_Lambda_To_X_GPU(self,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
self.Fourier_Lambda_To_X_GPU( W11_GPU )
self.Fourier_Lambda_To_X_GPU( W12_GPU )
self.Fourier_Lambda_To_X_GPU( W13_GPU )
self.Fourier_Lambda_To_X_GPU( W14_GPU )
self.Fourier_Lambda_To_X_GPU( W21_GPU )
self.Fourier_Lambda_To_X_GPU( W22_GPU )
self.Fourier_Lambda_To_X_GPU( W23_GPU )
self.Fourier_Lambda_To_X_GPU( W24_GPU )
self.Fourier_Lambda_To_X_GPU( W31_GPU )
self.Fourier_Lambda_To_X_GPU( W32_GPU )
self.Fourier_Lambda_To_X_GPU( W33_GPU )
self.Fourier_Lambda_To_X_GPU( W34_GPU )
self.Fourier_Lambda_To_X_GPU( W41_GPU )
self.Fourier_Lambda_To_X_GPU( W42_GPU )
self.Fourier_Lambda_To_X_GPU( W43_GPU )
self.Fourier_Lambda_To_X_GPU( W44_GPU )
def Wigner_4x4_Norm(self,W):
W0 = np.sum( W[0,0] )
W0 += np.sum( W[1,1] )
W0 += np.sum( W[2,2] )
W0 += np.sum( W[3,3] )
return W0.real * self.dX * self.dP
def Wigner_4X4__SpinTrace(self,W):
W0 = W[0,0].copy()
W0 += W[1,1]
W0 += W[2,2]
W0 += W[3,3]
return W0
#...................................................................................
def MakeGrossPitaevskiiTerms(self, B_minus_GPU, B_plus_GPU, Prob_X_GPU ):
"""
Makes the non-linear terms that characterize the Gross-Pitaevskii equation
"""
P_gridDIM_32 = np.int32(self.P_gridDIM)
cuda_fft.fft_Z2Z( Prob_X_GPU, Prob_X_GPU, self.plan_Z2Z_1D )
self.roll_FirstRowCopy_Function( B_minus_GPU, Prob_X_GPU, P_gridDIM_32,
block=self.blockCUDA, grid=(self.X_gridDIM/512,1) )
self.roll_FirstRowCopy_Function( B_plus_GPU, Prob_X_GPU, P_gridDIM_32,
block=self.blockCUDA, grid=(self.X_gridDIM/512,1) )
B_minus_GPU /= self.phase_LambdaTheta_GPU
B_plus_GPU *= self.phase_LambdaTheta_GPU
self.Fourier_Lambda_To_X_GPU( B_minus_GPU )
self.Fourier_Lambda_To_X_GPU( B_plus_GPU )
#...................................................................................
def WignerDiracPurity(self, W_4x4):
sum2 = np.sum( W_4x4[0,0]*W_4x4[0,0] )
sum2 += np.sum( W_4x4[0,1]*W_4x4[1,0] )
sum2 += np.sum( W_4x4[0,2]*W_4x4[2,0] )
sum2 += np.sum( W_4x4[0,3]*W_4x4[3,0] )
sum2 += np.sum( W_4x4[1,0]*W_4x4[0,1] )
sum2 += np.sum( W_4x4[1,1]*W_4x4[1,1] )
sum2 += np.sum( W_4x4[1,2]*W_4x4[2,1] )
sum2 += np.sum( W_4x4[1,3]*W_4x4[3,1] )
sum2 += np.sum( W_4x4[2,0]*W_4x4[0,2] )
sum2 += np.sum( W_4x4[2,1]*W_4x4[1,2] )
sum2 += np.sum( W_4x4[2,2]*W_4x4[2,2] )
sum2 += np.sum( W_4x4[2,3]*W_4x4[3,2] )
sum2 = np.sum( W_4x4[3,0]*W_4x4[0,3] )
sum2 += np.sum( W_4x4[3,1]*W_4x4[1,3] )
sum2 += np.sum( W_4x4[3,2]*W_4x4[2,3] )
sum2 += np.sum( W_4x4[3,3]*W_4x4[3,3] )
return 2*np.pi*sum2.real*self.dX*self.dP
def Wigner_4x4_Norm_GPU(self,W11,W22,W33,W44):
W0 = gpuarray.sum( W11 ).get()
W0 += gpuarray.sum( W22 ).get()
W0 += gpuarray.sum( W33 ).get()
W0 += gpuarray.sum( W44 ).get()
return W0.real * self.dX * self.dP
def Wigner_4X4_Normalize(self,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
norm = self.Wigner_4x4_Norm_GPU(W11_GPU, W22_GPU, W33_GPU, W44_GPU)
W11_GPU /= norm
W12_GPU /= norm
W13_GPU /= norm
W14_GPU /= norm
W21_GPU /= norm
W22_GPU /= norm
W23_GPU /= norm
W24_GPU /= norm
W31_GPU /= norm
W32_GPU /= norm
W33_GPU /= norm
W34_GPU /= norm
W41_GPU /= norm
W42_GPU /= norm
W43_GPU /= norm
W44_GPU /= norm
def DiracEnergy(self, temp_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU, t):
dXdP = self.dX*self.dP
c= self.c
mass = self.mass
energy = c*dXdP*gpuarray.dot(W14_GPU,self.P_GPU ).get()
energy += c*dXdP*gpuarray.dot(W23_GPU,self.P_GPU ).get()
energy += c*dXdP*gpuarray.dot(W32_GPU,self.P_GPU ).get()
energy += c*dXdP*gpuarray.dot(W41_GPU,self.P_GPU ).get()
energy += c**2*mass*dXdP*gpuarray.sum(W11_GPU).get()
energy += c**2*mass*dXdP*gpuarray.sum(W22_GPU).get()
energy -= c**2*mass*dXdP*gpuarray.sum(W33_GPU).get()
energy -= c**2*mass*dXdP*gpuarray.sum(W44_GPU).get()
energy += self.Potential_0_Average( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t )
energy += self.Potential_1_Average( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t )
energy += self.Potential_2_Average( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t )
energy += self.Potential_3_Average( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ,t )
return energy
#...................................................................
def Copy_gpuarray_row(self, matrix_GPU , n):
'''
Return the row n of a gpuarray matrix as a gpu array itself
'''
ncols = matrix_GPU.shape[1]
floatSize = matrix_GPU.dtype.itemsize
matrix_row = gpuarray.empty( ncols , matrix_GPU.dtype)
cuda.memcpy_dtod(matrix_row.ptr , matrix_GPU.ptr + floatSize*ncols*n , floatSize*ncols)
return matrix_row
def save_FrameItem(self,f,Psi_GPU,t):
PsiTemp = Psi_GPU.get()
f.create_dataset(str(t), data = np.frombuffer(PsiTemp))
def save_Density(self,f11,t_index,W11_GPU,W22_GPU,W33_GPU,W44_GPU):
print ' progress ', 100*t_index / (self.timeSteps+1), '%'
W0 = W11_GPU.get()
W0 += W22_GPU.get()
W0 += W33_GPU.get()
W0 += W44_GPU.get()
W0 = W0.real.astype(np.float64)
#print ' normalization = ', np.sum( W0 )*self.dX * self.dP
f11.create_dataset( str(t_index), data = W0 )
def save_WignerFunction(self,f11, t,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
W_= self.fftshift( W11_GPU.get() );
f11.create_dataset('W_real_11/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_11/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W12_GPU.get() );
f11.create_dataset('W_real_12/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_12/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W13_GPU.get() );
f11.create_dataset('W_real_13/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_13/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W14_GPU.get() );
f11.create_dataset('W_real_14/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_14/'+str(t), data = np.imag(W_) )
#
W_= self.fftshift( W21_GPU.get() );
f11.create_dataset('W_real_21/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_21/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W21_GPU.get() );
f11.create_dataset('W_real_22/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_22/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W21_GPU.get() );
f11.create_dataset('W_real_23/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_23/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W21_GPU.get() );
f11.create_dataset('W_real_24/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_24/'+str(t), data = np.imag(W_) )
#
W_= self.fftshift( W31_GPU.get() );
f11.create_dataset('W_real_31/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_31/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W32_GPU.get() );
f11.create_dataset('W_real_32/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_32/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W33_GPU.get() );
f11.create_dataset('W_real_33/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_33/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W34_GPU.get() );
f11.create_dataset('W_real_34/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_34/'+str(t), data = np.imag(W_) )
#
W_= self.fftshift( W41_GPU.get() );
f11.create_dataset('W_real_41/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_41/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W42_GPU.get() );
f11.create_dataset('W_real_42/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_42/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W43_GPU.get() );
f11.create_dataset('W_real_43/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_43/'+str(t), data = np.imag(W_) )
W_= self.fftshift( W44_GPU.get() );
f11.create_dataset('W_real_44/'+str(t), data = np.real(W_) )
f11.create_dataset('W_imag_44/'+str(t), data = np.imag(W_) )
#--------------------------------------------------------------------
def Load_Density(self,fileName,n):
f11 = h5py.File( fileName ,'r')
rho = f11[str(n)][...]
f11.close()
return rho
def Load_WignerFunction(self,fileName,n):
f11 = h5py.File( fileName ,'r')
#W = FILE['/'+str(n)][...]
W11 = f11['W_real_11/'+str(n)][...] + 1j*f11['W_imag_11/'+str(n)][...]
W12 = f11['W_real_12/'+str(n)][...] + 1j*f11['W_imag_12/'+str(n)][...]
W13 = f11['W_real_13/'+str(n)][...] + 1j*f11['W_imag_13/'+str(n)][...]
W14 = f11['W_real_14/'+str(n)][...] + 1j*f11['W_imag_14/'+str(n)][...]
W21 = f11['W_real_21/'+str(n)][...] + 1j*f11['W_imag_21/'+str(n)][...]
W22 = f11['W_real_22/'+str(n)][...] + 1j*f11['W_imag_22/'+str(n)][...]
W23 = f11['W_real_23/'+str(n)][...] + 1j*f11['W_imag_23/'+str(n)][...]
W24 = f11['W_real_24/'+str(n)][...] + 1j*f11['W_imag_24/'+str(n)][...]
W31 = f11['W_real_31/'+str(n)][...] + 1j*f11['W_imag_31/'+str(n)][...]
W32 = f11['W_real_32/'+str(n)][...] + 1j*f11['W_imag_32/'+str(n)][...]
W33 = f11['W_real_33/'+str(n)][...] + 1j*f11['W_imag_33/'+str(n)][...]
W34 = f11['W_real_34/'+str(n)][...] + 1j*f11['W_imag_34/'+str(n)][...]
W41 = f11['W_real_41/'+str(n)][...] + 1j*f11['W_imag_41/'+str(n)][...]
W42 = f11['W_real_42/'+str(n)][...] + 1j*f11['W_imag_42/'+str(n)][...]
W43 = f11['W_real_43/'+str(n)][...] + 1j*f11['W_imag_43/'+str(n)][...]
W44 = f11['W_real_44/'+str(n)][...] + 1j*f11['W_imag_44/'+str(n)][...]
f11.close()
return np.array( [[W11,W12,W13,W14],[W21,W22,W23,W24],[W31,W32,W33,W34],[W41,W42,W43,W44]] )
#....................................................................
# Caldeira Legget Damping
#....................................................................
def Theta_fp_Damping(self, LW_GPU, W_GPU):
self.gpu_array_copy_Function( LW_GPU , W_GPU , block=self.blockCUDA , grid=self.gridCUDA )
self.theta_fp_Damping_Function( LW_GPU , block=self.blockCUDA , grid=self.gridCUDA )
# x p -> theta p
self.Fourier_P_To_Theta_GPU( LW_GPU )
LW_GPU *= self.Theta_GPU
self.Fourier_Theta_To_P_GPU( LW_GPU )
def CaldeiraDissipatorOrder3(self, LW_GPU, LW_temp_GPU, W_GPU, dampingFunction):
# dampingFunction is a function of momentum
LW_GPU *= 0j
LW_GPU += W_GPU
dampingFunction( LW_temp_GPU , W_GPU )
LW_GPU += 2./3. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
dampingFunction( LW_temp_GPU , LW_GPU )
LW_GPU += 2./2. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
dampingFunction( LW_temp_GPU , LW_GPU )
W_GPU += 2. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
def CaldeiraDissipatorOrder3_4x4(self, LW1_GPU, LW2_GPU, dampingFunction,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W11_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W12_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W13_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W14_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W21_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W22_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W23_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W24_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W31_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W32_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W33_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W34_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W41_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W42_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W43_GPU ,dampingFunction)
self.CaldeiraDissipatorOrder3( LW1_GPU, LW2_GPU, W44_GPU ,dampingFunction)
def Product_ThetaP_GPU(self, W_GPU):
W_GPU *= self.P_GPU
self.Fourier_P_To_Theta_GPU(W_GPU)
W_GPU *= self.Theta_GPU
self.Fourier_Theta_To_P_GPU(W_GPU)
def CaldeiraDissipatorOrder2(self, LW1_GPU, LW2_GPU, W_GPU):
self.gpu_array_copy_Function( LW1_GPU , W_GPU , block=self.blockCUDA , grid=self.gridCUDA )
self.gpu_array_copy_Function( LW2_GPU , W_GPU , block=self.blockCUDA , grid=self.gridCUDA )
self.Product_ThetaP_GPU( LW1_GPU )
LW2_GPU += 1j * self.dt *self.gammaDamping * LW1_GPU
self.Product_ThetaP_GPU( LW2_GPU )
W_GPU += 2. * 1j * self.dt *self.gammaDamping * LW2_GPU
def CaldeiraDissipatorOrder2_4x4(self, LW1_GPU, LW2_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W11_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W12_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W13_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W14_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W21_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W22_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W23_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W24_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W31_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W32_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W33_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W34_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W41_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W42_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W43_GPU )
self.CaldeiraDissipatorOrder2( LW1_GPU, LW2_GPU, W44_GPU )
#....................... Ehrenfest functions ................................
def X_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ):
self.X_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def P_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ):
self.P_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def Alpha_1_Average(self, temp_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
self.Alpha_1_Average_Function( temp_GPU, W14_GPU, W23_GPU, W32_GPU, W41_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def XP_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ):
self.XP_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def XX_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ):
self.XX_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def PP_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ):
self.PP_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def D_1_Potential_0_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t ):
self.D_1_Potential_0_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def D_1_Potential_1_Average(self,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
x = self.dX*self.dP * gpuarray.dot( W14_GPU, self.D_1_Potential1_GPU ).get()
x += self.dX*self.dP * gpuarray.dot( W23_GPU, self.D_1_Potential1_GPU ).get()
x += self.dX*self.dP * gpuarray.dot( W32_GPU, self.D_1_Potential1_GPU ).get()
x += self.dX*self.dP * gpuarray.dot( W41_GPU, self.D_1_Potential1_GPU ).get()
return x
def D_1_Potential_2_Average(self,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
x = 1j*self.dX*self.dP * gpuarray.dot( W14_GPU, self.D_1_Potential2_GPU ).get()
x += -1j*self.dX*self.dP * gpuarray.dot( W23_GPU, self.D_1_Potential2_GPU ).get()
x += 1j*self.dX*self.dP * gpuarray.dot( W32_GPU, self.D_1_Potential2_GPU ).get()
x += -1j*self.dX*self.dP * gpuarray.dot( W41_GPU, self.D_1_Potential2_GPU ).get()
return x
def D_1_Potential_3_Average(self,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ):
x = self.dX*self.dP * gpuarray.dot( W13_GPU, self.D_1_Potential3_GPU ).get()
x += -self.dX*self.dP * gpuarray.dot( W24_GPU, self.D_1_Potential3_GPU ).get()
x += self.dX*self.dP * gpuarray.dot( W31_GPU, self.D_1_Potential3_GPU ).get()
x += -self.dX*self.dP * gpuarray.dot( W42_GPU, self.D_1_Potential3_GPU ).get()
return x
def X1_D_1_Potential_0_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t ):
self.X1_D_1_Potential_0_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def P1_D_1_Potential_0_Average(self, temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t ):
self.P1_D_1_Potential_0_Average_Function( temp_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU, t,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def P1_Alpha_1_Average(self, temp_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
self.P1_Alpha_1_Average_Function( temp_GPU, W14_GPU, W23_GPU, W32_GPU, W41_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def X1_Alpha_1_Average(self, temp_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU):
self.X1_Alpha_1_Average_Function( temp_GPU, W14_GPU, W23_GPU, W32_GPU, W41_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dP*( gpuarray.sum( temp_GPU ).get() )
def swap(self,A,B):
temp = A
A = B
B = temp
return A,B
def PlotWigner(self, W, color_min, color_max, xlim, ylim):
W0 = self.Wigner_4X4__SpinTrace( W ).real
x_min = -self.X_amplitude
x_max = self.X_amplitude - self.dX
p_min = -self.P_amplitude
p_max = self.P_amplitude - self.dP
global_max = color_max # Maximum value used to select the color range
global_min = color_min #
#print 'min = ', np.min( W0 ), ' max = ', np.max( W0 )
#print 'normalization = ', np.sum( W0 )*self.dX*self.dP
zero_position = abs( global_min) / (abs( global_max) + abs(global_min))
wigner_cdict = {'red' : ((0., 0., 0.),
(zero_position, 1., 1.),
(1., 1., 1.)),
'green' : ((0., 0., 0.),
(zero_position, 1., 1.),
(1., 0., 0.)),
'blue' : ((0., 1., 1.),
(zero_position, 1., 1.),
(1., 0., 0.)) }
wigner_cmap = matplotlib.colors.LinearSegmentedColormap('wigner_colormap', wigner_cdict, 512)
fig, ax = plt.subplots(figsize=(20, 7))
cax = ax.imshow( W0 ,origin='lower',interpolation='nearest',\
extent=[x_min, x_max, p_min, p_max], vmin= global_min, vmax=global_max, cmap=wigner_cmap)
ax.set_xlabel('x')
ax.set_ylabel('p')
ax.set_xlim( xlim )
ax.set_ylim( ylim )
ax.set_aspect(1)
ax.grid('on')
return fig
############################################################################
#
# Run
#
############################################################################
def Run(self):
try :
import os
os.remove (self.fileName)
except OSError:
pass
print '----------------------------------------------'
print ' Relativistic Wigner-Dirac Propagator: x-Px '
print '----------------------------------------------'
print ' dt = ', self.dt
print ' dx = ', self.dX
print ' dp = ', self.dP
print ' dLambda = ', self.dLambda
print ' '
# the CUDA X_gridDIM is set to be equal to the discretization number up to 512
f11 = h5py.File(self.fileName)
timeRangeIndex = range(1, self.timeSteps+1)
f11['x_gridDIM'] = self.X_gridDIM
f11['x_amplitude'] = self.X_amplitude
f11['p_amplitude'] = self.P_amplitude
f11['dx'] = self.dX
f11['dtheta'] = self.dTheta
f11['dp'] = self.dP
f11['p_gridDIM'] = self.P_gridDIM
f11['mass'] = self.mass
f11['dt'] = self.dt
f11['c'] = self.c
f11['x_range'] = self.X_range
f11['potential_0_String'] = self.Potential_0_String
f11['potential_1_String'] = self.Potential_1_String
f11['potential_2_String'] = self.Potential_2_String
f11['potential_3_String'] = self.Potential_3_String
W11_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[0,0],dtype = np.complex128) )
W12_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[0,1],dtype = np.complex128) )
W13_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[0,2],dtype = np.complex128) )
W14_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[0,3],dtype = np.complex128) )
W21_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[1,0],dtype = np.complex128) )
W22_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[1,1],dtype = np.complex128) )
W23_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[1,2],dtype = np.complex128) )
W24_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[1,3],dtype = np.complex128) )
W31_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[2,0],dtype = np.complex128) )
W32_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[2,1],dtype = np.complex128) )
W33_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[2,2],dtype = np.complex128) )
W34_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[2,3],dtype = np.complex128) )
W41_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[3,0],dtype = np.complex128) )
W42_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[3,1],dtype = np.complex128) )
W43_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[3,2],dtype = np.complex128) )
W44_GPU = gpuarray.to_gpu( np.ascontiguousarray(self.W_init[3,3],dtype = np.complex128) )
_W11_GPU = gpuarray.zeros_like(W11_GPU)
_W12_GPU = gpuarray.zeros_like(W11_GPU)
_W13_GPU = gpuarray.zeros_like(W11_GPU)
_W14_GPU = gpuarray.zeros_like(W11_GPU)
_W21_GPU = gpuarray.zeros_like(W11_GPU)
_W22_GPU = gpuarray.zeros_like(W11_GPU)
_W23_GPU = gpuarray.zeros_like(W11_GPU)
_W24_GPU = gpuarray.zeros_like(W11_GPU)
_W31_GPU = gpuarray.zeros_like(W11_GPU)
_W32_GPU = gpuarray.zeros_like(W11_GPU)
_W33_GPU = gpuarray.zeros_like(W11_GPU)
_W34_GPU = gpuarray.zeros_like(W11_GPU)
_W41_GPU = gpuarray.zeros_like(W11_GPU)
_W42_GPU = gpuarray.zeros_like(W11_GPU)
_W43_GPU = gpuarray.zeros_like(W11_GPU)
_W44_GPU = gpuarray.zeros_like(W11_GPU)
#################################################
print ' GPU memory Total ', pycuda.driver.mem_get_info()[1]/float(2**30) , 'GB'
print ' GPU memory Free ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
timeSavedIndexRange = [0]
if self.frameSaveMode == 'Wigner_4X4':
self.save_WignerFunction( f11, 0,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU)
if self.frameSaveMode=='Density':
self.save_Density(
f11,0,W11_GPU,W22_GPU,W33_GPU,W44_GPU)
#..............................................................................
################################################################################################################
#
# MAIN LOOP
#
################################################################################################################
self.blockCUDA = (512,1,1)
self.gridCUDA = (self.P_gridDIM, 1 ,self.X_gridDIM/512 )
initial_time = time.time()
B_GP_minus_GPU = gpuarray.empty_like( W11_GPU )
B_GP_plus_GPU = gpuarray.empty_like( W22_GPU )
Prob_X_GPU = gpuarray.empty( (self.X_gridDIM) , dtype = np.complex128 )
X_Average = []
P_Average = []
XP_Average = []
XX_Average = []
PP_Average = []
antiParticle_population = []
particle_population = []
Dirac_energy = []
Alpha_1_Average = []
D_1_Potential_0_Average = []
X1_D_1_Potential_0_Average = []
P1_D_1_Potential_0_Average = []
P1_Alpha_1_Average = []
X1_Alpha_1_Average = []
negativity = []
transmission = []
timeRange = np.array([0.])
print ' cuda grid = (', self.blockCUDA , ' , ' , self.gridCUDA, ')'
P_gridDIM_32 = np.int32(self.P_gridDIM)
aGPitaevskii_GPU = np.float64( self.grossPitaevskiiCoefficient )
gammaDamping_GPU = np.float64( self.gammaDamping )
for t_index in timeRangeIndex:
timeRange = np.append( timeRange , self.dt * t_index )
t_GPU = np.float64( self.dt * t_index )
#...............................................................................
if self.grossPitaevskiiCoefficient != 0. :
self.gpu_sum_axis0_Function(
Prob_X_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,
P_gridDIM_32, block=(512,1,1), grid=(self.X_gridDIM/512,1) )
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
#............... Ehrenfest ....................................................
X_Average.append( self.X_Average( _W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ) )
P_Average.append( self.P_Average( _W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ) )
XP_Average.append( self.XP_Average( _W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ) )
XX_Average.append( self.XX_Average( _W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ) )
PP_Average.append( self.PP_Average( _W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU ) )
Alpha_1_Average.append(
self.Alpha_1_Average(_W11_GPU, W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ) )
D_1_Potential_0_Average.append(
self.D_1_Potential_0_Average(_W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,t_GPU ) )
#self.D_1_Potential_0_ExpectationValue(
# W11_GPU, W22_GPU, W33_GPU, W44_GPU, t_GPU, _W11_GPU)
X1_D_1_Potential_0_Average.append(
self.X1_D_1_Potential_0_Average(_W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,t_GPU ) )
P1_D_1_Potential_0_Average.append(
self.P1_D_1_Potential_0_Average(_W11_GPU, W11_GPU, W22_GPU, W33_GPU, W44_GPU,t_GPU ) )
P1_Alpha_1_Average.append(
self.P1_Alpha_1_Average(_W11_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ) )
X1_Alpha_1_Average.append(
self.X1_Alpha_1_Average(_W11_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU ) )
self.pickup_negatives_Function(_W11_GPU,
W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA)
negativity.append(
self.dX*self.dP*np.real(gpuarray.sum(_W11_GPU).get()))
self.transmission_Function(_W11_GPU,
W11_GPU, W22_GPU, W33_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA)
transmission.append(
self.dX*self.dP*np.real(gpuarray.sum(_W11_GPU).get()))
#................ Energy ................................................
if self.computeEnergy == True:
energy = self.DiracEnergy(_W11_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU, t_GPU)
Dirac_energy.append( energy )
# x p -> lambda p .........................................
self.Fourier_4X4_X_To_Lambda_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
#
#............ Antiparticle production .....................
if self.antiParticleNorm == True:
antiParticleNorm = self.AntiParticlePopulation(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU)
antiParticle_population.append( antiParticleNorm )
"""particleNorm = self.ParticlePopulation(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU)
particle_population.append( particleNorm )"""
#############################################################
#
# propagation Lambda p
#
#############################################################
self.DiracPropagator_P_plus_Lambda(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#
self.DiracPropagator_P_minus_Lambda(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
###########################################################################
#
# propagation potential
#
###########################################################################
# lambda p -> x p
self.Fourier_4X4_Lambda_To_X_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
# x p -> x theta
self.Fourier_4X4_P_To_Theta_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
self.DiracPropagator_X_minus_Theta(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU, t_GPU,
B_GP_minus_GPU, B_GP_plus_GPU, aGPitaevskii_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
self.DiracPropagator_X_plus_Theta(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU, t_GPU,
B_GP_minus_GPU, B_GP_plus_GPU, aGPitaevskii_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
##########################################################
#
# Damping
#
##########################################################
if self.gammaDamping > 0 :
if self.dampingModel == 'ODM':
self.DiracPropagator_DampingODM(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU,
gammaDamping_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
# x theta -> x p
self.Fourier_4X4_Theta_To_P_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
if self.gammaDamping > 0 :
if self.dampingModel == 'CaldeiraLegget':
self.CaldeiraDissipatorOrder2_4x4(
_W11_GPU, _W12_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
#.......... Antiparticle filtering.........................
if self.antiParticleStepFiltering ==True:
self.Fourier_4X4_X_To_Lambda_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
self.FilterAntiParticles(
_W11_GPU, _W12_GPU, _W13_GPU, _W14_GPU,
_W21_GPU, _W22_GPU, _W23_GPU, _W24_GPU,
_W31_GPU, _W32_GPU, _W33_GPU, _W34_GPU,
_W41_GPU, _W42_GPU, _W43_GPU, _W44_GPU,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
W11_GPU,_W11_GPU = self.swap(W11_GPU,_W11_GPU)
W12_GPU,_W12_GPU = self.swap(W12_GPU,_W12_GPU)
W13_GPU,_W13_GPU = self.swap(W13_GPU,_W13_GPU)
W14_GPU,_W14_GPU = self.swap(W14_GPU,_W14_GPU)
W21_GPU,_W21_GPU = self.swap(W21_GPU,_W21_GPU)
W22_GPU,_W22_GPU = self.swap(W22_GPU,_W22_GPU)
W23_GPU,_W23_GPU = self.swap(W23_GPU,_W23_GPU)
W24_GPU,_W24_GPU = self.swap(W24_GPU,_W24_GPU)
W31_GPU,_W31_GPU = self.swap(W31_GPU,_W31_GPU)
W32_GPU,_W32_GPU = self.swap(W32_GPU,_W32_GPU)
W33_GPU,_W33_GPU = self.swap(W33_GPU,_W33_GPU)
W34_GPU,_W34_GPU = self.swap(W34_GPU,_W34_GPU)
W41_GPU,_W41_GPU = self.swap(W41_GPU,_W41_GPU)
W42_GPU,_W42_GPU = self.swap(W42_GPU,_W42_GPU)
W43_GPU,_W43_GPU = self.swap(W43_GPU,_W43_GPU)
W44_GPU,_W44_GPU = self.swap(W44_GPU,_W44_GPU)
self.Fourier_4X4_Lambda_To_X_GPU(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
w_absorb = np.float64(24.)
self.AbsorbBoundary_x_Function( w_absorb,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU
,block=self.blockCUDA, grid=self.gridCUDA )
#............................................................
self.Wigner_4X4_Normalize(
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU )
#................ Saving .....................................
if t_index % self.skipFrames == 0:
timeSavedIndexRange.append(t_index)
if self.frameSaveMode == 'Wigner_4X4':
self.save_WignerFunction( f11, t_index,
W11_GPU, W12_GPU, W13_GPU, W14_GPU,
W21_GPU, W22_GPU, W23_GPU, W24_GPU,
W31_GPU, W32_GPU, W33_GPU, W34_GPU,
W41_GPU, W42_GPU, W43_GPU, W44_GPU)
if self.frameSaveMode=='Density':
self.save_Density(
f11,t_index,W11_GPU,W22_GPU,W33_GPU,W44_GPU)
#.............................................................
final_time = time.time()
print ' computation time = ', final_time - initial_time, ' seconds'
#............................................................................
self.timeRange = timeRange
f11['timeRange'] = timeRange
self.antiParticle_population = np.array(antiParticle_population)
f11['antiParticle_population'] = self.antiParticle_population
#self.particle_population = np.array(particle_population)
#f11['particle_population'] = self.particle_population
self.Dirac_energy = np.array(Dirac_energy).real
f11['Dirac_energy'] = np.array(Dirac_energy)
self.timeSavedIndexRange = np.array(timeSavedIndexRange)
f11['timeSavedIndexRange'] = np.array(self.timeSavedIndexRange)
self.X_Average = np.array( X_Average ).real
f11['X_Average'] = self.X_Average
self.P_Average = np.array( P_Average ).real
f11['P_Average'] = self.P_Average
self.XP_Average = np.array( XP_Average ).real
f11['XP_Average'] = self.XP_Average
self.XX_Average = np.array( XX_Average ).real
f11['XX_Average'] = self.XX_Average
self.PP_Average = np.array( PP_Average ).real
f11['PP_Average'] = self.PP_Average
self.Alpha_1_Average = np.array( Alpha_1_Average ).real
f11['Alpha_1_Average'] = self.Alpha_1_Average
self.P1_Alpha_1_Average = np.array( P1_Alpha_1_Average ).real
f11['P1_Alpha_1_Average'] = self.P1_Alpha_1_Average
self.X1_Alpha_1_Average = np.array( X1_Alpha_1_Average ).real
f11['X1_Alpha_1_Average'] = self.X1_Alpha_1_Average
self.D_1_Potential_0_Average = np.array( D_1_Potential_0_Average ).real
f11['D_1_Potential_0_Average'] = self.D_1_Potential_0_Average
self.X1_D_1_Potential_0_Average = np.array( X1_D_1_Potential_0_Average ).real
f11['X1_D_1_Potential_0_Average'] = self.X1_D_1_Potential_0_Average
self.P1_D_1_Potential_0_Average = np.array( P1_D_1_Potential_0_Average ).real
f11['P1_D_1_Potential_0_Average'] = self.P1_D_1_Potential_0_Average
self.negativity = np.array(negativity).real
f11['Negativity'] = self.negativity
self.transmission = np.array(transmission).real
f11['transmission'] = self.transmission
#.............................................................................
f11.close()
W11 = W11_GPU.get()
W12 = W12_GPU.get()
W13 = W13_GPU.get()
W14 = W14_GPU.get()
W21 = W21_GPU.get()
W22 = W22_GPU.get()
W23 = W23_GPU.get()
W24 = W24_GPU.get()
W31 = W31_GPU.get()
W32 = W32_GPU.get()
W33 = W33_GPU.get()
W34 = W34_GPU.get()
W41 = W41_GPU.get()
W42 = W42_GPU.get()
W43 = W43_GPU.get()
W44 = W44_GPU.get()
W_end = np.array([ [W11,W12,W13,W14], [W21,W22,W23,W24], [W31,W32,W33,W34], [W41,W42,W43,W44] ])
self.plan_Z2Z_1D_Axes0.__del__()
self.plan_Z2Z_1D_Axes1.__del__()
self.plan_Z2Z_1D.__del__()
self.W_end = W_end
|
cabrer7/PyWignerCUDA
|
GPU_WignerDirac2D_MassPotential_4x4.py
|
Python
|
mit
| 128,968
|
[
"DIRAC",
"Gaussian",
"Psi4"
] |
45e7c2f7ccc0d805a4fde539182ed8d0c46e8b6a5fa20a389fc40011762c23e7
|
#!/usr/bin/python
from __future__ import print_function
import espressomd
from espressomd import thermostat
from espressomd import analyze
from espressomd import integrate
from espressomd import electrostatics
from espressomd import minimize_energy
from espressomd.interactions import *
import numpy as np
from threading import Thread
from math import *
from espressomd.visualization_opengl import *
from espressomd.shapes import *
print('8Ball BILLARD - An Espresso Visualizer Demo\nControls:\nNumpad 4/6: Adjust Angle\nNumpad 2/8: Adjust Impulse\nNumpad 5: Shoot')
#ESPRESSO
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
table_dim = [2.24,1.12]
system.box_l = [table_dim[0], 3, table_dim[1]]
visualizer = openGLLive(system,
ext_force_arrows = True,
ext_force_arrows_type_scale = [0.02],
ext_force_arrows_type_radii = [0.01],
background_color = [0.5,0.4,0.5],
drag_enabled = False,
particle_type_materials = ['medium', 'bright', 'bright', 'medium'],
particle_type_colors = [[1,1,1],[0.5,0.1,0.1],[0.1,0.2,0.4],[0.2,0.2,0.2]],
constraint_type_materials = ['dark'],
constraint_type_colors = [[0.1,0.424,0.011], [0.1,0.1,0.1]],
camera_position = [ 1.12, 2.8, 0.56],
window_size = [1000,600],
draw_axis = False,
light_pos = [table_dim[0]*0.5, 1.0, table_dim[1]*0.5],
light_colors = [[0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [1.0, 1.0, 1.0]],
light_brightness = 1.0)
stopped = True
angle = np.pi*0.5
impulse = 10.0
def decreaseAngle():
global angle,impulse
if stopped:
angle += 0.01
system.part[0].ext_force = impulse*np.array([sin(angle),0,cos(angle)])
def increaseAngle():
global angle,impulse
if stopped:
angle -= 0.01
system.part[0].ext_force = impulse*np.array([sin(angle),0,cos(angle)])
def decreaseImpulse():
global impulse,angle
if stopped:
impulse -= 0.5
system.part[0].ext_force = impulse*np.array([sin(angle),0,cos(angle)])
def increaseImpulse():
global impulse,angle
if stopped:
impulse += 0.5
system.part[0].ext_force = impulse*np.array([sin(angle),0,cos(angle)])
def fire():
global stopped
if stopped:
stopped = False
system.part[0].v = system.part[0].v + impulse * np.array([sin(angle),0,cos(angle)])
system.part[0].fix = [0,1,0]
system.part[0].ext_force = [0,0,0]
visualizer.keyboardManager.register_button(KeyboardButtonEvent('4',KeyboardFireEvent.Hold,decreaseAngle))
visualizer.keyboardManager.register_button(KeyboardButtonEvent('6',KeyboardFireEvent.Hold,increaseAngle))
visualizer.keyboardManager.register_button(KeyboardButtonEvent('2',KeyboardFireEvent.Hold,decreaseImpulse))
visualizer.keyboardManager.register_button(KeyboardButtonEvent('8',KeyboardFireEvent.Hold,increaseImpulse))
visualizer.keyboardManager.register_button(KeyboardButtonEvent('5',KeyboardFireEvent.Pressed,fire))
def main():
global stopped
system.time_step = 0.00008
system.cell_system.skin = 0.4
table_h = 0.5
ball_diam = 0.0572
hole_dist = 0.02
hole_rad = 0.08
hole_score_rad = 0.1
hole_pos = [[hole_dist, table_h, hole_dist],
[hole_dist, table_h, table_dim[1] - hole_dist],
[table_dim[0] - hole_dist, table_h, hole_dist],
[table_dim[0] - hole_dist, table_h, table_dim[1] - hole_dist],
[table_dim[0]*0.5, table_h, table_dim[1] - hole_dist],
[table_dim[0]*0.5, table_h, hole_dist]]
types = {'cue_ball': 0,'striped_ball':1, 'solid_ball':2,'black_ball':3, 'table':4,'wall':5,'hole':6}
system.constraints.add(shape=Wall(dist=table_h,normal=[0.0,1.0,0.0]),particle_type=types['table'],penetrable=1)
system.constraints.add(shape=Wall(dist=0.01,normal=[1.0,0.0,0.0]),particle_type=types['wall'],penetrable=1)
system.constraints.add(shape=Wall(dist=-(table_dim[0]-0.01),normal=[-1.0,0.0,0.0]),particle_type=types['wall'],penetrable=1)
system.constraints.add(shape=Wall(dist=0.01,normal=[0.0,0.0,1.0]),particle_type=types['wall'],penetrable=1)
system.constraints.add(shape=Wall(dist=-(table_dim[1]-0.01),normal=[0.0,0.0,-1.0]),particle_type=types['wall'],penetrable=1)
for h in hole_pos:
system.constraints.add(shape=Cylinder(center=(np.array(h)-np.array([0,table_h*0.5,0])).tolist(), axis=[0,1,0],radius = hole_rad, length = 1.02*table_h, direction = 1),particle_type=types['hole'], penetrable=1)
lj_eps = np.array([1])
lj_sig = np.array([ball_diam])
lj_cut = lj_sig*2.0**(1.0/6.0)
lj_cap = 20
mass = np.array([0.17])
num_types=len(lj_sig)
#LENNARD JONES
def mix_eps(eps1,eps2,rule='LB'):
return sqrt(eps1*eps2)
def mix_sig(sig1,sig2,rule='LB'):
return 0.5*(sig1+sig2)
for t1 in range(4):
for t2 in range(6):
system.non_bonded_inter[t1, t2].lennard_jones.set_params(
epsilon=mix_eps(lj_eps[0],lj_eps[0]),
sigma=mix_sig(lj_sig[0],lj_sig[0]),
cutoff=mix_sig(lj_cut[0],lj_cut[0]),
shift="auto")
ball_y = table_h+ball_diam*1.5
#PARTICLES
ball_start_pos = [table_dim[0]*0.25, ball_y, table_dim[1]*0.5]
system.part.add(id=0, pos=ball_start_pos ,type=types['cue_ball'],mass=mass[0])
spawnpos = []
spawnpos.append(ball_start_pos)
ball = system.part[0]
d = lj_sig[0]*1.15
a1 = np.array([d*sqrt(3)/2.0,0, -0.5*d])
a2 = np.array([d*sqrt(3)/2.0,0, 0.5*d])
sp = [system.box_l[0]*0.7, ball_y, system.box_l[2]*0.5+lj_sig[0]*0.5]
pid = 1
order = [
types['solid_ball'],
types['striped_ball'],types['solid_ball'],
types['solid_ball'],types['black_ball'],types['striped_ball'],
types['striped_ball'], types['solid_ball'],types['striped_ball'], types['solid_ball'],
types['solid_ball'], types['striped_ball'], types['striped_ball'],types['solid_ball'], types['striped_ball']]
for i in range(5):
for j in range(i+1):
N=i+1
t = order[pid-1]
pos = sp + a1*(N-j) + a2*j
system.part.add(id = pid, pos=pos, mass=mass[0],type =t, fix = [0,1,0])
spawnpos.append(pos)
pid += 1
ball.ext_force = impulse*np.array([sin(angle),0,cos(angle)])
ball.fix = [1,1,1]
system.thermostat.set_langevin(kT=0, gamma=0.8)
#ELECTROSTATICS
# p3m = electrostatics.P3M(prefactor=50, accuracy=1e-2)
# system.actors.add(p3m)
cleared_balls= [0,0]
while True:
system.integrator.run(1)
vsum = 0
for p in system.part:
vsum += np.linalg.norm(p.v)
for h in hole_pos:
d=((p.pos_folded[0]-h[0])**2 + (p.pos_folded[2]-h[2])**2)**0.5
if (d < hole_score_rad):
if p.id == 0:
p.pos = ball_start_pos
p.v = [0,0,0]
elif p.id == 5:
for p in system.part:
p.pos = spawnpos[p.id]
p.v = [0,0,0]
p.fix = [0,1,0]
ball.fix = [1,1,1]
ball.ext_force = impulse*np.array([sin(angle),0,cos(angle)])
stoppen = True
else:
t = p.type-1
cleared_balls[t] += 1
if t == 0:
z=table_dim[1]-lj_sig[0]*0.6
else:
z=lj_sig[0]*0.6
p.pos = [cleared_balls[t] * lj_sig[0] * 1.5 , 1.1, z]
p.fix = [1,1,1]
p.v = [0,0,0]
if not stopped and vsum < 0.3:
stopped = True
ball.fix = [1,1,1]
for p in system.part:
p.v = [0,0,0]
ball.ext_force = impulse*np.array([sin(angle),0,cos(angle)])
visualizer.update()
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
|
KonradBreitsprecher/espresso
|
samples/billard.py
|
Python
|
gpl-3.0
| 8,188
|
[
"ESPResSo"
] |
ce20b8a426afd5f63986744bfc23e7a544e3d1ec53f727cd309a0a951f6332a1
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .mcscf_solver import mcscf_solver
|
lothian/psi4
|
psi4/driver/procrouting/mcscf/__init__.py
|
Python
|
lgpl-3.0
| 953
|
[
"Psi4"
] |
961cc75089d45a948efee149a2b4c6567d00fac5b450397b65e68c6928ab8b4c
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import *
from vesuvio.backgrounds import PolynomialBackground
from vesuvio.fitting import FittingOptions, parse_fit_options
from vesuvio.profiles import GaussianMassProfile, GramCharlierMassProfile
class VesuvioFittingTest(unittest.TestCase):
def test_function_str_with_no_given_params_looks_as_expected(self):
fit_opts = self._create_test_fitting_opts()
expected = \
"composite=ComptonScatteringCountRate,NumDeriv=1,IntensityConstraints=\"Matrix(1|2)1.000000|-4.000000\";"\
"name=GramCharlierComptonProfile,Mass=1.007900,HermiteCoeffs=1 0 0,Width=5.000000;"\
"name=GaussianComptonProfile,Mass=16.000000,Width=10.000000"
self.assertEqual(expected, fit_opts.create_function_str())
# add background
fit_opts.background = PolynomialBackground(order=2)
expected += ";name=Polynomial,n=2"
self.assertEqual(expected, fit_opts.create_function_str())
def test_function_str_with_given_params_looks_as_expected(self):
fit_opts = self._create_test_fitting_opts()
param_vals = {"f0.Width": 7.5, "f0.FSECoeff": 0.1, "f0.C_0": 0.25,
"f0.C_2": 0.5, "f0.C_4": 0.75}
param_vals.update({"f1.Width": 11.0, "f1.Intensity": 4.5})
expected = \
"composite=CompositeFunction,NumDeriv=1;"\
"name=GramCharlierComptonProfile,Mass=1.007900,HermiteCoeffs=1 0 0,Width=7.500000,FSECoeff=0.100000,C_0=0.250000;"\
"name=GaussianComptonProfile,Mass=16.000000,Width=11.000000,Intensity=4.500000"
self.assertEqual(expected, fit_opts.create_function_str(param_vals))
fit_opts.background = PolynomialBackground(order=2)
param_vals.update({"f2.A0": 2.0, "f2.A1": 3.0, "f2.A2": 4.0})
expected += ";name=Polynomial,n=2,A0=2.000000,A1=3.000000,A2=4.000000"
self.assertEqual(expected, fit_opts.create_function_str(param_vals))
def test_constraint_str_gives_expected_value_when_width_has_constraint(self):
fit_opts = self._create_test_fitting_opts()
expected = "2.000000 < f0.Width < 7.000000,f0.C_0 > 0.0,f1.Intensity > 0.0"
self.assertEqual(expected, fit_opts.create_constraints_str())
# Fix the width and the constraint should be just the intensity
fit_opts.mass_profiles[0].width = 5.0
expected = "f0.C_0 > 0.0,f1.Intensity > 0.0"
self.assertEqual(expected, fit_opts.create_constraints_str())
def test_ties_str_gives_expected_value(self):
fit_opts = self._create_test_fitting_opts()
expected = "f0.Mass=1.007900,f1.Mass=16.000000,f1.Width=10.000000"
self.assertEqual(expected, fit_opts.create_ties_str())
# Fix the width and FSECoeff
fit_opts.mass_profiles[0].width = 5.0
fit_opts.mass_profiles[0].k_free = 0
expected = "f0.Mass=1.007900,f0.Width=5.000000,f0.FSECoeff=f0.Width*sqrt(2)/12,f1.Mass=16.000000,f1.Width=10.000000"
self.assertEqual(expected, fit_opts.create_ties_str())
def test_parse_fit_options(self):
mass_values = [1.0079, 16]
profile_strs = "function=Gaussian,width=[2,5,7];function=Gaussian,width=10.000000"
background_str = "function=Polynomial,order=2"
constraints_str = "[1,-4]"
fit_opts = parse_fit_options(mass_values, profile_strs, background_str, constraints_str)
self.assertEquals(2, len(fit_opts.mass_profiles))
self.assertTrue(isinstance(fit_opts.background, PolynomialBackground))
self.assertEquals(1, len(fit_opts.intensity_constraints))
self.assertEquals(2, len(fit_opts.intensity_constraints[0]))
def _create_test_fitting_opts(self):
gramc = GramCharlierMassProfile([2, 5, 7], 1.0079, [1, 0, 0], 1, 1)
gauss = GaussianMassProfile(10, 16)
constraints = list([1, -4])
return FittingOptions([gramc, gauss], intensity_constraints=constraints)
if __name__ == '__main__':
unittest.main()
|
mganeva/mantid
|
scripts/test/VesuvioFittingTest.py
|
Python
|
gpl-3.0
| 4,280
|
[
"Gaussian"
] |
ab75243848030047b03e118c3b622e2dc35fee74dc3d4aa796bcf346327b6ebe
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DistcpSettings(Model):
"""Distcp settings.
:param resource_manager_endpoint: Specifies the Yarn ResourceManager
endpoint. Type: string (or Expression with resultType string).
:type resource_manager_endpoint: object
:param temp_script_path: Specifies an existing folder path which will be
used to store temp Distcp command script. The script file is generated by
ADF and will be removed after Copy job finished. Type: string (or
Expression with resultType string).
:type temp_script_path: object
:param distcp_options: Specifies the Distcp options. Type: string (or
Expression with resultType string).
:type distcp_options: object
"""
_validation = {
'resource_manager_endpoint': {'required': True},
'temp_script_path': {'required': True},
}
_attribute_map = {
'resource_manager_endpoint': {'key': 'resourceManagerEndpoint', 'type': 'object'},
'temp_script_path': {'key': 'tempScriptPath', 'type': 'object'},
'distcp_options': {'key': 'distcpOptions', 'type': 'object'},
}
def __init__(self, resource_manager_endpoint, temp_script_path, distcp_options=None):
self.resource_manager_endpoint = resource_manager_endpoint
self.temp_script_path = temp_script_path
self.distcp_options = distcp_options
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/distcp_settings.py
|
Python
|
mit
| 1,861
|
[
"ADF"
] |
a15b3c28cfc5901beb1f7aab2eda1bd40cef15a2858bb2b675f8a4a89847f3fe
|
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../src')
import Steppers as Step
import Fluxes as Flux
from PyRsw import Simulation
from constants import minute, hour, day
def test():
sim = Simulation()
# Geometry and Model Equations
sim.geomy = 'walls'
sim.stepper = Step.AB3
sim.method = 'Spectral'
sim.dynamics = 'Nonlinear'
sim.flux_method = Flux.spectral_sw
# Specify paramters
sim.Ly = 4000e3
sim.Ny = 128
sim.cfl = 0.5
sim.Hs = [100.]
sim.rho = [1025.]
sim.end_time = 5.*minute
# Plotting parameters
sim.animate = 'None'
sim.output = False
sim.diagnose = False
# Initialize the grid and zero solutions
sim.initialize()
for ii in range(sim.Nz):
sim.soln.h[:,:,ii] = sim.Hs[ii]
# Gaussian initial conditions
x0 = 1.*sim.Lx/2.
W = 200.e3
amp = 1.
sim.soln.h[:,:,0] += amp*np.exp(-(sim.Y)**2/(W**2))
sim.run()
|
PyRsw/PyRsw
|
testing/test_does_1d_run/test_walls.py
|
Python
|
mit
| 1,087
|
[
"Gaussian"
] |
278dffb36737b63857b1e3863635281633b4d5df11b0068fd3731aee95a58786
|
"""Matlab(tm) compatibility functions.
This will hopefully become a complete set of the basic functions available in
matlab. The syntax is kept as close to the matlab syntax as possible. One
fundamental change is that the first index in matlab varies the fastest (as in
FORTRAN). That means that it will usually perform reductions over columns,
whereas with this object the most natural reductions are over rows. It's perfectly
possible to make this work the way it does in matlab if that's desired.
"""
#import Matrix --- cannot use Matrix module here because Linear Algebra imports it
# and Matrix depends on Linear Algebra.
from numarray.numeric import *
# Elementary Matrices
# zeros is from matrixmodule in C
# ones is from Numeric.py
def rand(*args):
"""rand(d1,...,dn) returns a matrix of the given dimensions
which is initialized to random numbers from a uniform distribution
in the range [0,1).
"""
import numarray.random_array as ra
return ra.random(args)
def randn(*args):
"""u = randn(d0,d1,...,dn) returns zero-mean, unit-variance Gaussian
random numbers in an array of size (d0,d1,...,dn)."""
import numarray.random_array as ra
x1 = ra.random(args)
x2 = ra.random(args)
return sqrt(-2*log(x1))*cos(2*pi*x2)
def eye(N, M=None, k=0, typecode=None):
"""eye(N, M=N, k=0, typecode=None) returns a N-by-M matrix where the
k-th diagonal is all ones, and everything else is zeros.
"""
if M is None: M = N
if type(M) == type('d'):
typecode = M
M = N
m = equal(subtract.outer(arange(N), arange(M)),-k)
return asarray(m,typecode=typecode)
def tri(N, M=None, k=0, typecode=None):
"""tri(N, M=N, k=0, typecode=None) returns a N-by-M matrix where all
the diagonals starting from lower left corner up to the k-th are all ones.
"""
if M is None: M = N
if type(M) == type('d'):
typecode = M
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(typecode)
# Matrix manipulation
def diag(v, k=0):
"""diag(v,k=0) returns the k-th diagonal if v is a matrix or
returns a matrix with v as the k-th diagonal if v is a vector.
"""
v = asarray(v)
s = v.shape
if len(s)==1:
n = s[0]+abs(k)
if k > 0:
v = concatenate((zeros(k, v.typecode()),v))
elif k < 0:
v = concatenate((v,zeros(-k, v.typecode())))
return eye(n, k=k)*v
elif len(s)==2:
v = add.reduce(eye(s[0], s[1], k=k)*v)
if k > 0: return v[k:]
elif k < 0: return v[:k]
else: return v
else:
raise ValueError, "Input must be 1- or 2-D."
def fliplr(m):
"""fliplr(m) returns a 2-D matrix m with the rows preserved and
columns flipped in the left/right direction. Only works with 2-D
numarray.
"""
m = asarray(m)
if len(m.shape) != 2:
raise ValueError, "Input must be 2-D."
return m[:, ::-1]
def flipud(m):
"""flipud(m) returns a 2-D matrix with the columns preserved and
rows flipped in the up/down direction. Only works with 2-D numarray.
"""
m = asarray(m)
if len(m.shape) != 2:
raise ValueError, "Input must be 2-D."
return m[::-1]
# reshape(x, m, n) is not used, instead use reshape(x, (m, n))
def rot90(m, k=1):
"""rot90(m,k=1) returns the matrix found by rotating m by k*90 degrees
in the counterclockwise direction.
"""
m = asarray(m)
if len(m.shape) != 2:
raise ValueError, "Input must be 2-D."
k = k % 4
if k == 0: return m
elif k == 1: return transpose(fliplr(m))
elif k == 2: return fliplr(flipud(m))
elif k == 3: return fliplr(transpose(m))
def tril(m, k=0):
"""tril(m,k=0) returns the elements on and below the k-th diagonal of
m. k=0 is the main diagonal, k > 0 is above and k < 0 is below the main
diagonal.
"""
m = asarray(m)
out = tri(m.shape[0], m.shape[1], k=k, typecode=m.typecode())*m
return out
def triu(m, k=0):
"""triu(m,k=0) returns the elements on and above the k-th diagonal of
m. k=0 is the main diagonal, k > 0 is above and k < 0 is below the main
diagonal.
"""
m = asarray(m)
out = (1-tri(m.shape[0], m.shape[1], k-1, m.typecode()))*m
return out
# Data analysis
# Basic operations
def max(m,axis=0):
"""max(m,axis=0) returns the maximum of m along dimension axis.
"""
m = asarray(m)
return maximum.reduce(m,axis)
def min(m,axis=0):
"""min(m,axis=0) returns the minimum of m along dimension axis.
"""
m = asarray(m)
return minimum.reduce(m,axis)
# Actually from Basis, but it fits in so naturally here...
def ptp(m,axis=0):
"""ptp(m,axis=0) returns the maximum - minimum along the the given dimension
"""
m = asarray(m)
return max(m,axis)-min(m,axis)
def mean(m,axis=0):
"""mean(m,axis=0) returns the mean of m along the given dimension.
If m is of integer type, returns a floating point answer.
"""
m = asarray(m)
return add.reduce(m,axis)/float(m.shape[axis])
# sort is done in C but is done row-wise rather than column-wise
def msort(m):
"""msort(m) returns a sort along the first dimension of m as in MATLAB.
"""
m = asarray(m)
return transpose(sort(transpose(m)))
def median(m):
"""median(m) returns a median of m along the first dimension of m.
"""
sorted = msort(m)
if sorted.shape[0] % 2 == 1:
return sorted[int(sorted.shape[0]/2)]
else:
index=sorted.shape[0]/2
return (sorted[index-1]+sorted[index])/2.0
def std(m,axis=0):
"""std(m,axis=0) returns the standard deviation along the given
dimension of m. The result is unbiased with division by N-1.
If m is of integer type returns a floating point answer.
"""
x = asarray(m)
n = float(x.shape[axis])
mx = asarray(mean(x,axis))
if axis < 0:
axis = len(x.shape) + axis
mx.shape = mx.shape[:axis] + (1,) + mx.shape[axis:]
x = x - mx
return sqrt(add.reduce(x*x,axis)/(n-1.0))
def cumsum(m,axis=0):
"""cumsum(m,axis=0) returns the cumulative sum of the elements along the
given dimension of m.
"""
m = asarray(m)
return add.accumulate(m,axis)
def prod(m,axis=0):
"""prod(m,axis=0) returns the product of the elements along the given
dimension of m.
"""
m = asarray(m)
return multiply.reduce(m,axis)
def cumprod(m,axis=0):
"""cumprod(m) returns the cumulative product of the elments along the
given dimension of m.
"""
m = asarray(m)
return multiply.accumulate(m,axis)
def trapz(y, x=None, axis=-1):
"""trapz(y,x=None,axis=-1) integrates y along the given dimension of
the data array using the trapezoidal rule.
"""
y = asarray(y)
if x is None:
d = 1.0
else:
d = diff(x,axis=axis)
y = asarray(y)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
return add.reduce(d * (y[slice1]+y[slice2])/2.0,axis)
def diff(x, n=1,axis=-1):
"""diff(x,n=1,axis=-1) calculates the n'th difference along the axis specified.
Note that the result is one shorter in the axis'th dimension.
Returns x if n == 0. Raises ValueError if n < 0.
"""
x = asarray(x)
nd = len(x.shape)
if nd == 0:
nd = 1
if n < 0:
raise ValueError, 'MLab.diff, order argument negative.'
elif n == 0:
return x
elif n == 1:
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
return x[slice1]-x[slice2]
else:
return diff(diff(x,1,axis), n-1)
def cov(m,y=None, rowvar=0, bias=0):
"""Estimate the covariance matrix.
If m is a vector, return the variance. For matrices where each row
is an observation, and each column a variable, return the covariance
matrix. Note that in this case diag(cov(m)) is a vector of
variances for each column.
cov(m) is the same as cov(m, m)
Normalization is by (N-1) where N is the number of observations
(unbiased estimate). If bias is 1 then normalization is by N.
If rowvar is zero, then each row is a variable with
observations in the columns.
"""
if y is None:
y = m
else:
y = y
if rowvar:
m = transpose(m)
y = transpose(y)
if (m.shape[0] == 1):
m = transpose(m)
if (y.shape[0] == 1):
y = transpose(y)
N = m.shape[0]
if (y.shape[0] != N):
raise ValueError, "x and y must have the same number of observations."
m = m - mean(m,axis=0)
y = y - mean(y,axis=0)
if bias:
fact = N*1.0
else:
fact = N-1.0
#
val = squeeze(dot(transpose(m),conjugate(y)) / fact)
return val
def corrcoef(x, y=None):
"""The correlation coefficients
"""
c = cov(x, y)
d = diag(c)
return c/sqrt(multiply.outer(d,d))
# Added functions supplied by Travis Oliphant
import LinearAlgebra2 as LinearAlgebra
def squeeze(a):
"squeeze(a) returns a with any ones from the shape of a removed"
a = asarray(a)
b = asarray(a.shape)
return reshape (a, tuple (compress (not_equal (b, 1), b)))
def kaiser(M,beta):
"""kaiser(M, beta) returns a Kaiser window of length M with shape parameter
beta. It depends on the cephes module for the modified bessel function i0.
"""
import cephes
n = arange(0,M)
alpha = (M-1)/2.0
return cephes.i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/cephes.i0(beta)
def blackman(M):
"""blackman(M) returns the M-point Blackman window.
"""
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""bartlett(M) returns the M-point Bartlett window.
"""
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""hanning(M) returns the M-point Hanning window.
"""
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""hamming(M) returns the M-point Hamming window.
"""
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
def sinc(x):
"""sinc(x) returns sin(pi*x)/(pi*x) at all points of array x.
"""
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def eig(v):
"""[x,v] = eig(m) returns the eigenvalues of m in x and the corresponding
eigenvectors in the rows of v.
"""
return LinearAlgebra.eigenvectors(v)
def svd(v):
"""[u,x,v] = svd(m) return the singular value decomposition of m.
"""
return LinearAlgebra.singular_value_decomposition(v)
def angle(z):
"""phi = angle(z) return the angle of complex argument z."""
z = asarray(z)
if z.typecode() in ['D','F']:
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag,zreal)
def roots(p):
""" return the roots of the polynomial coefficients in p.
The values in the rank-1 array p are coefficients of a polynomial.
If the length of p is n+1 then the polynomial is
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
"""
if type(p) in [types.IntType, types.FloatType, types.ComplexType]:
p = asarray([p])
else:
p = asarray(p)
n = len(p)
if len(p.shape) != 1:
raise ValueError, "Input must be a rank-1 array."
# Strip zeros at front of array
ind = 0
while (p[ind] == 0):
ind = ind + 1
p = asarray(p[ind:])
N = len(p)
root = zeros((N-1,),'D')
# Strip zeros at end of array which correspond to zero-valued roots
ind = len(p)
while (p[ind-1]==0):
ind = ind - 1
p = asarray(p[:ind])
N = len(p)
if N > 1:
A = diag(ones((N-2,),p.typecode()),-1)
A[0,:] = -p[1:] / p[0]
root[:N-1] = eig(A)[0]
if ((root.typecode() == 'F' and allclose(root.imag, 0, rtol=1e-7)) or
(root.typecode() == 'D' and allclose(root.imag, 0, rtol=1e-14))):
root = root.real
return root
|
fxia22/ASM_xf
|
PythonD/site_python/numarray/linear_algebra/mlab.py
|
Python
|
gpl-2.0
| 12,223
|
[
"Gaussian"
] |
7a178115849f30de2b3f16410024a542837bda578b881db746900ab16c1a6dd7
|
'''
MeneScraper is a big data project that monitors the social interactions of Meneame.
Copyright (C) 2015 Arnau Villoslada
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
# Imports
from django.core.management.base import BaseCommand
from scraper.models import SocialSync, OriginalURL, Error, FreeProxy
from django.utils import timezone
from random import randint
# I know this can be done on one line like: urllib, json... but NINJA-IDE doesn't let me
import urllib
import json
import time
import socket
import datetime
# When called, it checks the twitter interactions of the given URL.
# If successful it returns the interactions returned by the twitter API
# If it fails, returns false.
def TwitterScan(encoded_url):
# This tries to request the api, if successful extract the json
try:
twitter_req = urllib.urlopen(
"http://urls.api.twitter.com/1/urls/count.json?url={url}".format(url=encoded_url)
)
# Exception occured with urllib.urlopen(), store it in the Error model
except Exception as e:
error = Error(
error="URL: {url}\nTwitter API error: {error}".format(
error=e,
url=encoded_url,
),
created_by="social_sync.py",
)
error.save()
# Print that it failed
print " This one failed! Automatically saved in errors model"
# Return False so the Command function can know it failed.
return False
# Successfully requested the API, transform to JSON and return the total interactions
twitter_json = json.load(twitter_req)
return twitter_json["count"]
# When called, it tries to visit the Facebook API with the given URL to extract the
# total social interactions. If succesful it returns the total interactions,
# otherwhise, returns False.
def FacebookScan(encoded_url, use_proxies):
# This checks if the user said that it wants to sync using proxies
if use_proxies:
# This assings the maximum time in seconds to wait for the urllib.urlopen() below to
# respond
socket.setdefaulttimeout(15)
# This gets the proxy that was used longer ago from the FreeProxy model.
# It also returns the proxy IP in a dictionary to use with urllib.urlopen
oldest_proxy, oldest_proxy_dic = OldestProxyDic()
# The function to get the proxies failed, without proxies this function can't continue
# return false.
if oldest_proxy is False:
return False
# If this runs it means the user didn't select any proxy.
# Just assign an empty dictionary, that way urllib.openurl() will ignore the proxies
# argument and use localhost
else:
oldest_proxy_dic = {"": ""}
# We have to use this because if the URL contains things like "?X=" the facebook API
# will think we are passing parameters to it.
encoded_url = urllib.quote(encoded_url)
# This tries to request the Facebook API. If successful, it will extract the JSON.
# If it fails it will return False to let the Command function know it did so.
try:
# IMPORTANT NOTE: The idea to use proxies was to bypass the API request limit.
# If we are using proxies + an access_token, it makes no sense to use proxies but
# for now we have to use an access_token until a new function is coded for the proxies.
# The reason for that is, the facebook API json changes when you visit it without an
# access token, that means the json is different and so a new function is required.
facebook_req = urllib.urlopen(
"https://graph.facebook.com/v2.3/{url}?access_token="
"CONFIGURE-ME: Add your own Facebook access token".format(url=encoded_url),
proxies=oldest_proxy_dic,
)
# The urllib.urlopen() returned an exception, store it in the Error model
except Exception as e:
error = Error(
error="URL: {url}\nurllib.urlopen() error: {error}".format(
error=e,
url=encoded_url,
),
created_by="social_sync.py",
)
error.save()
# This checks if the exception was caused for any of the following reasons
# If it did, call the DisableProxy function to mark the used proxy as obsolete.
if "[Errno socket error]" in str(e) or "http protocol error" in str(e):
DisableProxy(oldest_proxy, e)
# Tell the user it failed
print " This one failed! Automatically saved in errors model"
# Return false instead of the social interactions
return False
# This checks if the returned html is actually a json. If it's not json, the proxy works,
# if it isn't, the proxy is obsolete
try:
facebook_json = json.load(facebook_req)
except Exception as e:
error = Error(
error=(
"URL: {url}\n"
"json.load() error: {error}\n"
"Proxy used during error: {proxy_ip}"
).format(
error=e,
url=encoded_url,
proxy_ip=oldest_proxy.ip,
),
created_by="social_sync.py",
)
error.save()
# This proxy is obsolete, call DisableProxy to mark it as such in the FreeProxy model
DisableProxy(oldest_proxy, e)
# Tell the user it failed
print " This one failed! Automatically saved in errors model"
# Tell the Command function it failed
return False
# This checks if there's a key called error in the facebook json.
# If so, the facebook api returned an error.
if "error" in facebook_json:
# Tell the user the API returned an exception and the exception message
print " Facebook API returned exception:"
print " {exception}".format(exception=facebook_json["error"]["message"])
# Store the error in the Error model
error = Error(
error="URL: {url}\nFacebook API returned exception:\n{error}".format(
error=facebook_json["error"]["message"],
url=encoded_url,
),
created_by="social_sync.py",
)
error.save()
# Return False instead of the total interactions
return False
# This checks if the Facebook API returned nothing for the requested URL
# This can happen if the URL is from facebook itself. I don't know why the facebook API
# does this.
if not "share" in facebook_json:
# Store the error in the errors model, it will probably spam the error model with this
# for a couple of days but it doesn't hurt.
error = Error(
error="URL: {url}\n"
"The Facebook API returned no shares.\n"
"This is known to happen if the URL in question is from Facebook itself".format(
url=encoded_url,
),
created_by="social_sync.py",
)
error.save()
# Tell the user what happend, never bad practice
print " The Facebook API returned no shares, setting it to 0"
# It returns 0 because this way it will not delete the social interaction entry.
# We can't get the facebook interactions for this URL but it's still of
# interest to get the twitter ones.
return 0
# PLEASE READ BEFORE ADDING MORE CODE BELOW:
# it is important that "if not 'share' in facebook_json" stays at the bottom.
# Because this 'if' returns 0 if true, it is important that the other 'ifs' to detect
# errors run first.
# Everything went fine; return the total interactions
return facebook_json["share"]["share_count"]
# This function is used to select the best proxy to use to request the API
# If it fails, it returns false
def OldestProxyDic():
# This queryset ignores any proxy marked as obsolete and orders them by used date
oldest_proxy = FreeProxy.objects.filter(working=True).order_by("last_used_datetime")
# This counts how many results the above query returned
proxies_count = oldest_proxy.count()
# This part will choose randomly one of 4 proxies at the start of the queryset.
# This way there will be no pattern in the proxies we use and the ones that were used
# longer ago will be used now.
# No pattern = harder to detect by a machine and block it.
if proxies_count > 3:
# There's more than 4 proxies available in the list, randomly choose between 0 and 3
random_int = randint(0, 3)
elif proxies_count == 1:
# There is only one proxy available, select that one
random_int = 0
# Tell the user there's only one proxy left
print " WARNING: Only one proxy is available"
# Check if the FreeProxy model has proxies at all
elif proxies_count == 0:
# No proxies left, tell the user and return false so the FacebookScan
# function knows it failed
print " Error: No proxies found, please add more."
return (False, False)
else:
random_int = randint(0, proxies_count)
# It gets the proxy selected randomly by the above if and else. This way we avoid
# creating a loop. If there's no loop, there's no pattern = harder to detect by
# a machine and block it.
oldest_proxy = oldest_proxy[random_int]
# It generates the dicctionary needed for the urllib.urlopen() so the FacebookScan function
# doesn't need to do it
oldest_proxy_dic = {"http": "http://" + oldest_proxy.ip}
# Saves the the last time the proxy we just selected was last used, witch is right now.
oldest_proxy.last_used_datetime = timezone.now()
oldest_proxy.save()
# Return the proxy and the dictonary we just got
return (oldest_proxy, oldest_proxy_dic)
# This is needed to mark the proxies as obsoletes if needed and to respect the DRY rule.
# You can check why we might want to mark a proxy as obsolete in the FacebookScan function.
def DisableProxy(free_proxy, reason):
free_proxy.working = False
free_proxy.not_working_reason = reason
free_proxy.save()
print " Marked {proxy_ip} as obsolete".format(proxy_ip=free_proxy.ip)
class Command(BaseCommand):
# Just some help text for the ./manage.py help social_sync command
# These comments(lint) are so Ninja-IDE ignores this line so it wont display errors
#lint:disable
help = "Get the Twitter and Facebook social interactions of all the links in the database"
#lint:enable
def add_arguments(self, parser):
# This is to define the argument --use-proxies. This way the user can select if
# he wants to use proxies or not to request the Facebook API
parser.add_argument("--use-proxies",
action="store_true",
dest="use_proxies",
default=False,
help="Use proxies to request the Facebook API",
)
# This is a required argument. With --days-back the user can choose how many days
# back wants to sync from the OriginalURL model
parser.add_argument("--days-back",
action="store",
dest="days_back",
default=False,
help="Define with an integer how many days back you want to sync\n"
"Example: Current date is 2015-30-05.\n"
" 'social_sync --days-back 0,1' would sync days 30 and 29",
)
def handle(self, *args, **options):
# Checks if the user used the days-back argument, witch is required
if not options["days_back"]:
# Tell the user the days-back argument is required
return "Please define how many days back you want to sync with '--days-back'"
# Make a list of the string given by the user. Expect a list created with commas
days_back = options["days_back"].split(",")
# This checks if what the user gave us two objects.
# If the user doesn't separate the numbers with a comma, this will also triger
# because the .split(",") only separates from the commas
if len(days_back) != 2:
return "Please use only two integers separated by a comma. EX: 0,4"
# Tries to convert what to user gave us with the days-back argument to an integer
try:
# Convert the strings the user gave us to integers
days_back[0] = int(days_back[0])
days_back[1] = int(days_back[1])
# This executes when the user doesn't use integers
except:
# Tell the user that the days-back argument only works with integers
return "Please use integers only with --days-back"
# Sort the integers the user gave us, this has to be done because the user
# could type the integers in reverse order by accident EX: 4,1
days_back = sorted(days_back)
# This gets the current date and time and it saves the date in current_date
# excluding the time, we don't need that
rightnow = timezone.now()
current_date = datetime.date(rightnow.year, rightnow.month, rightnow.day)
# From the current date it extracts X days to get the two neccesary variables
# for the queryset. X is defined by the user
from_date = current_date - datetime.timedelta(days=days_back[1])
to_date = current_date - datetime.timedelta(days=days_back[0])
# Gets the objects from the date the user told us and loops them
original_url = OriginalURL.objects.filter(pub_date__range=(from_date, to_date))
if not original_url.count():
return "No {days} days old entries found".format(days=days_back)
for url in original_url:
# Gets the url of the current object and encodes it to utf-8 to avoid crashes
actual_url = url.url.encode("utf-8")
# Tell the user witch url is processing
print "Processing {url}".format(url=actual_url)
# This calls the FacebookScan function to get the facebook interactions of the
# post URL
facebook_int = FacebookScan(actual_url, options["use_proxies"])
# This calls the TwitterScan function to get the twitter interactions of the
# post URL
twitter_int = TwitterScan(actual_url)
# If any of those functions returns false it means something went wrong, go
# to the next OriginalURL
if facebook_int is False or twitter_int is False:
continue
# Add the total interactions of facebook and twitter to get the total.
total_int = int(facebook_int + twitter_int)
# This creates a new object in the SocialSync model to save all the info that
# we just got and links this object with the right OriginalURL object
social_sync = SocialSync(
post_facebook_int=facebook_int,
post_twitter_int=twitter_int,
post_total_int=total_int,
scan_date=timezone.now(),
scan_time=timezone.now(),
scan_date_ts=int(time.time()),
original_url=url,
)
social_sync.save()
# Some messages for the user, it doesn't hurt
print " Added {facebook_int} Facebook interacitons".format(
facebook_int=facebook_int)
print " Added {twitter_int} Twitter interactions".format(
twitter_int=twitter_int)
print " Scan date: {now}".format(
now=timezone.localtime(timezone.now()))
print " Total URL interactions: {total_int}".format(
total_int=total_int)
# Let's now add the Meneame URL interactions
meneame_url = url.meneame_url.encode("utf-8")
# This calls the FacebookScan function to get the facebook interactions of the
# meneame URL
facebook_int = FacebookScan(meneame_url, options["use_proxies"])
# This calls the TwitterScan function to get the twitter interactions of the
# meneame URL
twitter_int = TwitterScan(meneame_url)
# If any of those functions returns false it means something went wrong, go
# to the next OriginalURL
if facebook_int is False or twitter_int is False:
if social_sync:
social_sync.delete()
print " Entry deleted due to a failure"
continue
# Add the total interactions of facebook and twitter to get the total.
total_int = int(facebook_int + twitter_int)
# If this executes it's because everything went just fine, so it adds them
social_sync.meneame_facebook_int = facebook_int
social_sync.meneame_twitter_int = twitter_int
social_sync.meneame_total_int = total_int
social_sync.save()
# Let's tell the user that we are done and everything went just fine and the
# total of interactions made
print " Total Meneame interactions: {total_int}".format(total_int=total_int)
print " Completed successfully"
|
TheDegree0/menescraper
|
scraper/management/commands/social_sync.py
|
Python
|
gpl-2.0
| 17,964
|
[
"VisIt"
] |
48fc26a88c5e8bcaa5cf139d262ccf8842545191dd8267d9a88f5c3dfd1127d2
|
# -*- coding: utf-8 -*-
""" Sahana Eden Climate Model
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
This file should be moved to s3db/climate.py to use, or else rewritten as custom_ models
"""
__all__ = ("ClimateModel",
"climate_first_run",
)
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class ClimateModel(S3Model):
"""
Climate data is stored in dynamically created tables.
These tables can be added from the command line script add_table.py
in modules.ClimateDataPortal.
The table definitions are stored in climate_sample_table_spec.
A data is an observed value over a time quantum at a given place.
e.g. observed temperature in Kathmandu between Feb 2006 - April 2007
Places are currently points, i.e. lat/lon coordinates.
Places may be stations.
Places may have elevation or other optional information.
@ToDo: i18n
@ToDo: Deprecate raw SQL (Tested only on PostgreSQL)
"""
names = ("climate_place",
"climate_place_elevation",
"climate_place_station_name",
"climate_place_station_id",
"climate_sample_table_spec",
"climate_monthly_aggregation",
"climate_station_parameter",
"climate_prices",
"climate_purchase",
"climate_save_query",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
NONE = current.messages["NONE"]
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Climate Place
#
# This resource is spread over 4 tables, which we assume are linked by
# common IDs
#
# @ToDo: Migrate to gis_location?
# Although this table has many fields unused so a performance hit?
# elevation is not included as it would just mean a performance hit
# when we are generating 2D maps without elevation info.
define_table("climate_place",
Field("longitude", "double",
notnull=True,
required=True,
),
Field("latitude", "double",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# elevation may not be useful for future projects
# e.g. where not available, or sea-based stations
# also, elevation may be supplied for gridded data
define_table("climate_place_elevation",
Field("elevation_metres", "double",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# not all places are stations with elevations
# as in the case of "gridded" data
# a station can only be in one place
tablename = "climate_place_station_name"
define_table(tablename,
Field("name", "double",
notnull=True,
required=True,
),
)
station_id = S3ReusableField("station_id", "reference %s" % tablename,
label = "Station",
ondelete = "RESTRICT",
requires = IS_ONE_OF(db,
"climate_place_station_name.id",
climate_station_represent,
orderby = "climate_place_station_name.name",
sort = True,
),
represent = climate_station_represent,
sortby = "name",
)
# ---------------------------------------------------------------------
# station id may not be useful or even meaningful
# e.g. gridded data has no stations.
# this is passive data so ok to store separately
define_table("climate_place_station_id",
Field("station_id", "integer",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# coefficient of variance is meaningless for degrees C but Ok for Kelvin
# internally all scales must be ratio scales if coefficient
# of variations is to be allowed, (which it is)
# rainfall (mm), temp (K) are ok
# output units
define_table("climate_sample_table_spec",
Field("name",
notnull=True,
required=True,
),
Field("sample_type_code",
length = 1,
notnull = True,
# web2py requires a default value for not null fields
default = "",
required = True
),
Field("field_type",
notnull=True,
required=True,
),
Field("units",
notnull=True,
required=True,
),
Field("date_mapping",
default="",
notnull=True,
required=True
),
Field("grid_size", "double",
default = 0,
notnull = True,
required = True
)
)
parameter_id = S3ReusableField("parameter_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
)
# ---------------------------------------------------------------------
define_table("climate_monthly_aggregation",
Field("sample_table_id",
db.climate_sample_table_spec,
notnull = True,
required = True
),
# this maps to the name of a python class
# that deals with the monthly aggregated data.
Field("aggregation",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# Station Parameters
#
tablename = "climate_station_parameter"
define_table(tablename,
station_id(),
parameter_id(requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
),
Field.Method("range_from",
climate_station_parameter_range_from),
Field.Method("range_to",
climate_station_parameter_range_to),
)
ADD = T("Add new Station Parameter")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Station Parameter Details"),
title_list = T("Station Parameters"),
title_update = T("Edit Station Parameter"),
label_list_button = T("List Station Parameters"),
label_delete_button = T("Remove Station Parameter"),
msg_record_created = T("Station Parameter added"),
msg_record_modified = T("Station Parameter updated"),
msg_record_deleted = T("Station Parameter removed"),
msg_list_empty = T("No Station Parameters"))
configure(tablename,
insertable = False,
list_fields = [
"station_id",
"parameter_id",
(T("Range From"), "range_from"),
(T("Range To"), "range_to"),
]
)
# =====================================================================
# Purchase Data
#
nationality_opts = {
1:"Nepali Student",
2:"Others"
}
tablename = "climate_prices"
define_table(tablename,
Field("category", "integer",
label = T("Category"),
requires = IS_IN_SET(nationality_opts),
represent = lambda id: nationality_opts.get(id, NONE),
notnull = True,
required = True
),
parameter_id(
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
filterby = "sample_type_code",
filter_opts = ("O",),
sort=True
),
notnull = True,
required = True,
represent = sample_table_spec_represent
),
Field("nrs_per_datum", "double",
label = T("NRs per datum"),
notnull = True,
required = True
)
)
configure(tablename,
create_onvalidation = self.climate_price_create_onvalidation,
list_fields=[
"category",
"parameter_id",
"nrs_per_datum"
]
)
ADD = T("Add new Dataset Price")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Dataset Price Details"),
title_list = T("Dataset Prices"),
title_update = T("Edit Dataset Price"),
label_list_button = T("List Dataset Prices"),
label_delete_button = T("Remove Dataset Price"),
msg_record_created = T("Dataset Price added"),
msg_record_modified = T("Dataset Price updated"),
msg_record_deleted = T("Dataset Price removed"),
msg_list_empty = T("No Dataset Prices"))
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
if auth.s3_has_role(ADMIN):
paid_writable = True
else:
paid_writable = False
tablename = "climate_purchase"
define_table(tablename,
#user_id(),
#Field("sample_type_code",
# "string",
# requires = IS_IN_SET(sample_type_code_opts),
# represent = lambda code: ClimateDataPortal.sample_table_types_by_code[code]
#),
Field("parameter_id", "integer",
requires = IS_ONE_OF(db,
"climate_prices.parameter_id",
sample_table_spec_represent,
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
),
station_id(),
s3_date("date_from",
default = "now",
empty=False
),
s3_date("date_to",
default = "now",
empty=False
),
Field("nationality", "integer",
label = T("Category"),
requires = IS_IN_SET(nationality_opts),
represent = lambda id: nationality_opts.get(id, NONE),
required = True
),
Field("notes", "text",
label = T("Receipt number / Student ID / other notes")
),
Field("price"),
Field("paid", "boolean",
represent = lambda opt: \
opt and "Yes" or "No",
writable = paid_writable,
),
Field("i_agree_to_the_terms_and_conditions", "boolean",
required = True,
represent = lambda agrees: agrees and "Yes" or "No",
comment = DIV(_class="stickytip",
_title="%s|%s" % (
T("Important"),
T("Check this box when you have read, "
"understand and agree to the "
"<a href='terms' target='_blank'>"
"terms and conditions"
"</a>."
)
)
)
),
*s3_meta_fields(),
on_define = lambda table: [table.owned_by_user.set_attributes(label = T("User")),
]
)
crud_strings[tablename] = Storage(
label_create = T("Purchase New Data"),
title_display = T("Purchased Data Details"),
title_list = T("All Purchased Data"),
title_update = T("Edit Purchased Data"),
label_list_button = T("List Dataset Prices"),
label_delete_button = T("Remove Purchased Data"),
msg_record_created = T("Data Purchase In Process"),
msg_record_modified = T("Purchased Data updated"),
msg_record_deleted = T("Purchased Data removed"),
msg_list_empty = T("No Data Purchased"))
configure(tablename,
onaccept = self.climate_purchase_onaccept,
create_next = URL(args = ["[id]", "read"]),
list_fields=[
"owned_by_user",
"parameter_id",
"station_id",
"date_from",
"date_to",
"nationality",
#"purpose",
"price",
"paid",
"i_agree_to_terms_and_conditions"
]
)
# =====================================================================
# Saved Queries
#
tablename = "climate_save_query"
define_table(tablename,
#user_id(),
Field("description"),
Field("query_definition", "text"),
)
crud_strings[tablename] = Storage(
label_create = T("Save Query"),
title_display = T("Saved Query Details"),
title_list = T("Saved Queries"),
title_update = T("Edit Saved Query"),
label_list_button = T("List Saved Queries"),
label_delete_button = T("Remove Saved Query"),
msg_record_created = T("Query Saved"),
msg_record_modified = T("Saved Query updated"),
msg_record_deleted = T("Saved Query removed"),
msg_list_empty = T("No Saved Queries"))
configure(tablename,
listadd = False,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def climate_price_create_onvalidation(form):
"""
"""
vars = form.request_vars
db = current.db
table = db.climate_prices
query = (table.category == vars["category"]) & \
(table.parameter_id == vars["parameter_id"])
price = db(query).select(table.id,
limitby=(0, 1)).first()
if price is not None:
form.errors["nrs_per_datum"] = [
"There is a conflicting price for the above category and parameter."
]
return False
else:
return True
# -------------------------------------------------------------------------
@staticmethod
def climate_purchase_onaccept(form):
"""
Calculate Price
"""
from templates.Climate import ClimateDataPortal
vars = form.vars
id = vars.id
db = current.db
ptable = db.climate_purchase
purchase = db(ptable.id == id).select(ptable.paid,
limitby=(0, 1)).first()
if (purchase and purchase.paid == True):
pass
else:
parameter_id = vars.parameter_id
table = db.climate_sample_table_spec
query = (table.id == parameter_id)
parameter_table = db(query).select(table.id,
table.date_mapping,
limitby=(0, 1)).first()
parameter_table_id = parameter_table.id
date_mapping_name = parameter_table.date_mapping
period = date_mapping_name
date_from = vars.date_from
date_to = vars.date_to
nationality = int(vars.nationality)
table = db.climate_prices
query = (table.category == nationality) & \
(table.parameter_id == parameter_id)
price_row = db(query).select(table.nrs_per_datum,
limitby=(0, 1)).first()
if price_row is None:
form.errors["price"] = ["There is no price set for this data"]
else:
price = price_row.nrs_per_datum
currency = {
1: "%.2f NRs",
2: "US$ %.2f"
}[nationality]
date_mapping = getattr(ClimateDataPortal, date_mapping_name)
start_date_number = date_mapping.date_to_time_period(date_from)
end_date_number = date_mapping.date_to_time_period(date_to)
place_id = int(vars.station_id)
datum_count = db.executesql(
"SELECT COUNT(*) "
"FROM climate_sample_table_%(parameter_table_id)i "
"WHERE place_id = %(place_id)i "
"AND time_period >= %(start_date_number)i "
"AND time_period <= %(end_date_number)i;" % locals()
)[0][0]
ptable[id] = {"price": currency % (datum_count * price)}
# =============================================================================
def climate_station_represent(id, row=None):
"""
"""
if row:
id = row.id
s3db = current.s3db
table = s3db.climate_place_station_id
row_id = db(table.id == id).select(table.station_id,
limitby=(0,1)).first()
table = s3db.climate_place_station_name
row_name = db(table.id == id).select(table.name,
limitby=(0,1)).first()
if row_id and row_id.station_id:
represent = " (%s)" % row_id.station_id
else:
represent = ""
if row_name and row_name.name:
represent = "%s%s" % (row_name.name, represent)
return represent or current.messages["NONE"]
# =============================================================================
def sample_table_spec_represent(id, row=None):
"""
"""
if row:
id = row.id
from templates.Climate import ClimateDataPortal
table = current.s3db.climate_sample_table_spec
row = current.db(table.id == id).select(table.name,
table.sample_type_code,
limitby=(0, 1)).first()
if row:
return "%s %s" % (
ClimateDataPortal.sample_table_types_by_code[row.sample_type_code].__name__,
row.name
)
else:
return current.messages["NONE"]
# =============================================================================
def climate_station_parameter_range_from(row):
default = current.messages["NONE"]
if hasattr(row, "climate_station_parameter"):
row = row.climate_station_parameter
try:
parameter_id = row.parameter_id
station_id = row.station_id
except AttributeError:
return default
table = current.s3db.table("climate_sample_table_%s" % parameter_id)
if not table:
return default
date = table.time_period.min()
row = db(table.place_id == station_id).select(date).first()
if row:
date = row[date]
from templates.Climate import ClimateDataPortal
year, month = ClimateDataPortal.month_number_to_year_month(date)
return "%s-%s" % (month, year)
else:
return default
# -------------------------------------------------------------------------
def climate_station_parameter_range_to(row):
default = current.messages["NONE"]
if hasattr(row, "climate_station_parameter"):
row = row.climate_station_parameter
try:
parameter_id = row.parameter_id
station_id = row.station_id
except AttributeError:
return default
table = current.s3db.table("climate_sample_table_%s" % parameter_id)
if not table:
return default
date = table.time_period.max()
row = db(table.place_id == station_id).select(date).first()
if row:
date = row[date]
from templates.Climate import ClimateDataPortal
year, month = ClimateDataPortal.month_number_to_year_month(date)
return "%s-%s" % (month, year)
else:
return default
# =============================================================================
def climate_first_run():
"""
Called from zzz_1st_run.py
Manual SQL Statements to run after tables are created
"""
errors = []
settings = current.deployment_settings
if settings.get_database_type() != "postgres":
errors.append("Climate unresolved dependency: PostgreSQL required")
try:
import rpy2
except ImportError:
errors.append("""
R is required by the climate data portal to generate charts
To install R: refer to:
http://cran.r-project.org/doc/manuals/R-admin.html
rpy2 is required to interact with python.
To install rpy2, refer to:
http://rpy.sourceforge.net/rpy2/doc-dev/html/overview.html
""")
try:
from Scientific.IO import NetCDF
except ImportError:
errors.append("Climate unresolved dependency: NetCDF required if you want to import readings")
try:
from scipy import stats
except ImportError:
errors.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")
if errors:
# Report errors and stop.
prefix = "\n%s: " % current.T("ACTION REQUIRED")
msg = prefix + prefix.join(errors)
current.log.critical(msg)
raise HTTP(500, body=msg)
db = current.db
# Load all stations and parameters
s3db = current.s3db
ptable = s3db.climate_station_parameter
if not db(ptable.id > 0).select(ptable.id,
limitby=(0, 1)):
table = s3db.climate_place_station_name
station_rows = db(table.id > 0).select(table.id)
table = db.climate_sample_table_spec
query = (table.sample_type_code == "O")
for station_row in station_rows:
parameter_rows = db(query).select(table.id)
for parameter_row in parameter_rows:
ptable.insert(
station_id = station_row.id,
parameter_id = parameter_row.id
)
db.executesql(
"ALTER TABLE climate_sample_table_spec"
"ADD CONSTRAINT climate_sample_table_name_sample_type_unique"
"UNIQUE (name, sample_type_code);"
"ALTER TABLE climate_prices"
"ADD CONSTRAINT climate_price_unique"
"UNIQUE (category, parameter_id);"
)
db.commit()
# END =========================================================================
|
flavour/eden
|
modules/templates/historic/Climate/climate.py
|
Python
|
mit
| 27,767
|
[
"NetCDF"
] |
703a60c7324b9d6b098bca58ecb2320a6d3843cb75b0185dbf45e8555109977b
|
import sys
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed(object):
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers(object):
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'd1746364b48a020dab9ef0568e6c0cd2',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'd1746364b48a020dab9ef0568e6c0cd2'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (
np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=np.object)
high_o = np.array([high] * 10, dtype=np.object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
import hashlib
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
ahaldane/numpy
|
numpy/random/tests/test_generator_mt19937.py
|
Python
|
bsd-3-clause
| 84,506
|
[
"Gaussian"
] |
7ca0ad94cf33881c4c7fe4b579513780ffdeab4cbcbbf07ba0ba77a54a28251c
|
import numpy as np
from .control import model_setup
from .cp_confocal import twod, threed
from .cp_mix import double_pnum
# 3D + 2D + T
def CF_Gxyz_3d2d_gauss(parms, tau):
u""" Two-component, two- and three-dimensional diffusion
with a Gaussian laser profile.
particle2D = (1-F)/ (1+τ/τ_2D)
particle3D = α²*F/( (1+τ/τ_3D) * sqrt(1+τ/(τ_3D*SP²)))
norm = (1-F + α*F)²
G = 1/n*(particle1 + particle2)/norm + offset
*parms* - a list of parameters.
Parameters (parms[i]):
[0] n Effective number of particles in confocal volume
(n = n2D+n3D)
[1] τ_2D Diffusion time of surface bound particls
[2] τ_3D Diffusion time of freely diffusing particles
[3] F Fraction of molecules of the freely diffusing species
(n3D = n*F), 0 <= F <= 1
[4] SP SP=z₀/r₀ Structural parameter,
describes elongation of the confocal volume
[5] α Relative molecular brightness of particle
3D compared to particle 2D (α = q3D/q2D)
[6] offset
*tau* - lag time
"""
n = parms[0]
taud2D = parms[1]
taud3D = parms[2]
F = parms[3]
SP = parms[4]
alpha = parms[5]
off = parms[6]
g = double_pnum(n=n,
F1=1-F,
alpha=alpha,
comp1=twod,
comp2=threed,
kwargs1={"tau": tau,
"taudiff": taud2D},
kwargs2={"tau": tau,
"taudiff": taud3D,
"SP": SP},
)
G = off + g
return G
def supplements(parms, countrate=None):
u"""Supplementary parameters:
Effective number of freely diffusing particles in 3D solution:
[7] n3D = n*F
Effective number particles diffusing on 2D surface:
[9] n2D = n*(1-F)
"""
# We can only give you the effective particle number
n = parms[0]
F3d = parms[3]
Info = list()
# The enumeration of these parameters is very important for
# plotting the normalized curve. Countrate must come out last!
Info.append([u"n3D", n*F3d])
Info.append([u"n2D", n*(1.-F3d)])
if countrate is not None:
# CPP
cpp = countrate/n
Info.append([u"cpp [kHz]", cpp])
return Info
parms = [
25, # n
240, # taud2D
0.1, # taud3D
0.5, # F3D
7, # SP
1.0, # alpha
0.0 # offset
]
# Boundaries
# strictly positive
boundaries = [[0, np.inf]]*len(parms)
# F
boundaries[3] = [0, .9999999999999]
boundaries[-1] = [-np.inf, np.inf]
model_setup(
modelid=6036,
name="Separate 3D and 2D diffusion (confocal)",
comp="3D+2D",
mtype="Confocal (Gaussian)",
fctn=CF_Gxyz_3d2d_gauss,
par_labels=[
u"n",
u"τ_2D [ms]",
u"τ_3D [ms]",
u"F_3D",
u"SP",
u"\u03b1"+" (q_3D/q_2D)",
u"offset"
],
par_values=parms,
par_vary=[True, True, True, True, False, False, False],
par_boundaries=boundaries,
par_constraints=[[2, "<", 1]],
supplementary_method=supplements
)
|
paulmueller/PyCorrFit
|
pycorrfit/models/model_confocal_3d_2d.py
|
Python
|
gpl-2.0
| 3,282
|
[
"Gaussian"
] |
be08851fdddf37fcd795e1b4d83f813f0e0c29ba0e9a23e90b10638657d41cbf
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ProviderShowResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, provider=None):
"""
ProviderShowResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'provider': 'Provider'
}
self.attribute_map = {
'provider': 'provider'
}
self._provider = provider
@property
def provider(self):
"""
Gets the provider of this ProviderShowResponse.
The requested provider.
:return: The provider of this ProviderShowResponse.
:rtype: Provider
"""
return self._provider
@provider.setter
def provider(self, provider):
"""
Sets the provider of this ProviderShowResponse.
The requested provider.
:param provider: The provider of this ProviderShowResponse.
:type: Provider
"""
self._provider = provider
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vericred/vericred-python
|
vericred_client/models/provider_show_response.py
|
Python
|
apache-2.0
| 12,182
|
[
"VisIt"
] |
4891365c19738df04121368c49eb868e30f1e9793bece213e23910be8f5a90d4
|
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class LaplaceInplace(Benchmark):
params = ['inplace', 'normal']
param_names = ['update']
def setup(self, update):
N = 150
Niter = 1000
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
def num_update(u, dx2, dy2):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
def num_inplace(u, dx2, dy2):
tmp = u[:(-2), 1:(-1)].copy()
np.add(tmp, u[2:, 1:(-1)], out=tmp)
np.multiply(tmp, dy2, out=tmp)
tmp2 = u[1:(-1), 2:].copy()
np.add(tmp2, u[1:(-1), :(-2)], out=tmp2)
np.multiply(tmp2, dx2, out=tmp2)
np.add(tmp, tmp2, out=tmp)
np.multiply(tmp, (1.0 / (2.0 * (dx2 + dy2))),
out=u[1:(-1), 1:(-1)])
def laplace(N, Niter=100, func=num_update, args=()):
u = np.zeros([N, N], order='C')
u[0] = 1
for i in range(Niter):
func(u, *args)
return u
func = {'inplace': num_inplace, 'normal': num_update}[update]
def run():
laplace(N, Niter, func, args=(dx2, dy2))
self.run = run
def time_it(self, update):
self.run()
class MaxesOfDots(Benchmark):
def setup(self):
np.random.seed(1)
nsubj = 5
nfeat = 100
ntime = 200
self.arrays = [np.random.normal(size=(ntime, nfeat))
for i in xrange(nsubj)]
def maxes_of_dots(self, arrays):
"""
A magical feature score for each feature in each dataset
:ref:`Haxby et al., Neuron (2011) <HGC+11>`.
If arrays are column-wise zscore-d before computation it
results in characterizing each column in each array with
sum of maximal correlations of that column with columns
in other arrays.
Arrays must agree only on the first dimension.
For numpy it a join benchmark of dot products and max()
on a set of arrays.
"""
feature_scores = ([0] * len(arrays))
for (i, sd) in enumerate(arrays):
for (j, sd2) in enumerate(arrays[(i + 1):]):
corr_temp = np.dot(sd.T, sd2)
feature_scores[i] += np.max(corr_temp, axis=1)
feature_scores[((j + i) + 1)] += np.max(corr_temp, axis=0)
return feature_scores
def time_it(self):
self.maxes_of_dots(self.arrays)
|
mingwpy/numpy
|
benchmarks/benchmarks/bench_app.py
|
Python
|
bsd-3-clause
| 2,716
|
[
"NEURON"
] |
e6842391fd4c00cec34f08ac1fe1d9bd2f1169d8655958784b84e21956fe1099
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Request Q"
"""
from __future__ import with_statement
import atexit
import multiprocessing
import threading
import weakref
import sys
import base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_thread_references = set()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
for thread_reference in _thread_references:
thread = thread_reference()
if thread is not None:
thread.join()
def _remove_dead_thread_references():
"""Remove inactive threads from _thread_references.
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
>>> ... t = ThreadPoolExecutor(max_workers=5)
>>> ... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
if thread_reference() is None:
_thread_references.discard(thread_reference)
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue, shutdown):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a seperate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
try:
call_item = call_queue.get(block=True, timeout=0.1)
except queue.Empty:
if shutdown.is_set():
return
else:
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_manangement_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue,
shutdown_process_event):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
shutdown_process_event: A multiprocessing.Event used to signal the
process workers that they should exit when their work queue is
empty.
"""
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
try:
result_item = result_queue.get(block=True, timeout=0.1)
except queue.Empty:
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_process_event.set()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
return
del executor
else:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
class ProcessPoolExecutor(base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_remove_dead_thread_references()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_process_event = multiprocessing.Event()
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_manangement_worker,
args=(weakref.ref(self),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue,
self._shutdown_process_event))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_thread_references.add(weakref.ref(self._queue_management_thread))
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue,
self._shutdown_process_event))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if wait:
if self._queue_management_thread:
self._queue_management_thread.join()
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._shutdown_process_event = None
self._processes = None
shutdown.__doc__ = base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
mj3-16/mjtest
|
mjtest/util/concurrent/futures/process.py
|
Python
|
mit
| 14,280
|
[
"Brian"
] |
45418f3449a5f0a29ae3a78a1a9d68efe131fb01153099585f2399111205edd8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
The core of the Gramps plugin system. This module provides capability to load
plugins from specified directories and provide information about the loaded
plugins.
Plugins are divided into several categories. These are: reports, tools,
importers, exporters, quick reports, and document generators.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import os
from gi.repository import Gtk, GdkPixbuf, Gdk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.callback import Callback
from gramps.gen.plug import BasePluginManager, PluginRegister
from gramps.gen.constfunc import win
from gramps.gen.config import config
from gramps.gen.const import ICON
#-------------------------------------------------------------------------
#
# GuiPluginManager
#
#-------------------------------------------------------------------------
class GuiPluginManager(Callback):
""" PluginManager is a Singleton which manages plugins.
It is the gui implementation using a unique BasePluginmanager.
This class adds the possibility to hide plugins in the GUI via a config
setting
"""
__instance = None
__signals__ = { 'plugins-reloaded' : None }
def get_instance():
""" Use this function to get the instance of the PluginManager """
if GuiPluginManager.__instance is None:
GuiPluginManager.__instance = 1 # Set to 1 for __init__()
GuiPluginManager.__instance = GuiPluginManager()
return GuiPluginManager.__instance
get_instance = staticmethod(get_instance)
def __init__(self):
""" This function should only be run once by get_instance() """
if GuiPluginManager.__instance is not 1:
raise Exception("This class is a singleton. "
"Use the get_instance() method")
Callback.__init__(self)
self.basemgr = BasePluginManager.get_instance()
self.__hidden_plugins = set(config.get('plugin.hiddenplugins'))
# self.__hidden_changed() # See bug 9561
def load_plugin(self, pdata):
if not self.is_loaded(pdata.id):
#load stock icons before import, only gui needs this
if pdata.icons:
if pdata.icondir and os.path.isdir(pdata.icondir):
dir = pdata.icondir
else:
#use the plugin directory
dir = pdata.directory
# Append icon directory to the theme search path
theme = Gtk.IconTheme.get_default()
theme.append_search_path(dir)
return self.basemgr.load_plugin(pdata)
def reload_plugins(self):
self.basemgr.reload_plugins()
self.emit('plugins-reloaded')
def __getattr__(self, name):
return getattr(self.basemgr, name)
def __hidden_changed(self, *args):
#if hidden changed, stored data must be emptied as it could contain
#something that now must be hidden
self.empty_managed_plugins()
#objects that need to know if the plugins available changed, are
#listening to this signal to update themselves. If a plugin becomes
#(un)hidden, this should happen, so we emit.
self.emit('plugins-reloaded')
def get_hidden_plugin_ids(self):
"""
Returns copy of the set hidden plugin ids
"""
return self.__hidden_plugins.copy()
def hide_plugin(self, id):
""" Hide plugin with given id. This will hide the plugin so queries do
not return it anymore, and write this change to the config.
Note that config will then emit a signal
"""
self.__hidden_plugins.add(id)
config.set('plugin.hiddenplugins', list(self.__hidden_plugins))
config.save()
self.__hidden_changed()
def unhide_plugin(self, id):
""" Unhide plugin with given id. This will unhide the plugin so queries
return it again, and write this change to the config
"""
self.__hidden_plugins.remove(id)
config.set('plugin.hiddenplugins', list(self.__hidden_plugins))
config.save()
self.__hidden_changed()
def get_reg_reports(self, gui=True):
""" Return list of non hidden registered reports
:Param gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return [plg for plg in self.basemgr.get_reg_reports(gui)
if plg.id not in self.__hidden_plugins]
def get_reg_tools(self, gui=True):
""" Return list of non hidden registered tools
:Param gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return [plg for plg in self.basemgr.get_reg_tools(gui)
if plg.id not in self.__hidden_plugins]
def get_reg_views(self):
""" Return list of non hidden registered views
"""
return [plg for plg in self.basemgr.get_reg_views()
if plg.id not in self.__hidden_plugins]
def get_reg_quick_reports(self):
""" Return list of non hidden registered quick reports
"""
return [plg for plg in self.basemgr.get_reg_quick_reports()
if plg.id not in self.__hidden_plugins]
def get_reg_mapservices(self):
""" Return list of non hidden registered mapservices
"""
return [plg for plg in self.basemgr.get_reg_mapservices()
if plg.id not in self.__hidden_plugins]
def get_reg_bookitems(self):
""" Return list of non hidden reports registered as bookitem
"""
return [plg for plg in self.basemgr.get_reg_bookitems()
if plg.id not in self.__hidden_plugins]
def get_reg_gramplets(self):
""" Return list of non hidden reports registered as bookitem
"""
return [plg for plg in self.basemgr.get_reg_gramplets()
if plg.id not in self.__hidden_plugins]
def get_reg_sidebars(self):
""" Return list of non hidden registered sidebars
"""
return [plg for plg in self.basemgr.get_reg_sidebars()
if plg.id not in self.__hidden_plugins]
def get_reg_importers(self):
""" Return list of registered importers
"""
return [plg for plg in self.basemgr.get_reg_importers()
if plg.id not in self.__hidden_plugins]
def get_reg_exporters(self):
""" Return list of registered exporters
"""
return [plg for plg in self.basemgr.get_reg_exporters()
if plg.id not in self.__hidden_plugins]
def get_reg_docgens(self):
""" Return list of registered docgen
"""
return [plg for plg in self.basemgr.get_reg_docgens()
if plg.id not in self.__hidden_plugins]
def get_reg_databases(self):
""" Return list of non hidden registered database backends
"""
return [plg for plg in self.basemgr.get_reg_databases()
if plg.id not in self.__hidden_plugins]
def get_reg_general(self, category=None):
return [plg for plg in self.basemgr.get_reg_general(category)
if plg.id not in self.__hidden_plugins]
|
sam-m888/gramps
|
gramps/gui/pluginmanager.py
|
Python
|
gpl-2.0
| 8,526
|
[
"Brian"
] |
97d0b9f30ce5ce94b83dbca7d59e8c03d25cc2c258ad485e1d677332e0de0c71
|
# -*- coding: utf-8 -*-
r"""
.. _tut_background_filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus [1]_ and
Ifeachor and Jervis [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* 2015 [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`tut_artifacts_filter` tutorial.
.. contents::
:local:
Problem statement
=================
The practical issues with filtering electrophysiological data are covered
well by Widmann *et al.* in [7]_, in a follow-up to an article where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase SNR, but if it is not used carefully,
it can distort data. Here we hope to cover some filtering basics so
users can better understand filtering tradeoffs, and why MNE-Python has
chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \\
&= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\
&= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over:
1. The numerator coefficients :math:`b_k`, which get multiplied by
the previous input :math:`x(n-k)` values, and
2. The denominator coefficients :math:`a_k`, which get multiplied by
the previous output :math:`y(n-k)` values.
Note that these summations in :eq:`summations` correspond nicely to
(1) a weighted `moving average`_ and (2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in [1]_, FIR and IIR have different tradeoffs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
2015 [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002[2]_, p. 321),
...FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always tradeoffs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency tradeoff, and it will
show up below.
FIR Filters
===========
First we will focus first on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try designing a low-pass filter, and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG data.
import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
import mne
sfreq = 1000.
f_p = 40.
ylim = [-60, 10] # for dB plots
xlim = [2, sfreq / 2.]
blue = '#1f77b4'
###############################################################################
# Take for example an ideal low-pass filter, which would give a value of 1 in
# the pass-band (up to frequency :math:`f_p`) and a value of 0 in the stop-band
# (down to frequency :math:`f_s`) such that :math:`f_p=f_s=40` Hz here
# (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
def box_off(ax):
ax.grid(zorder=0)
for key in ('top', 'right'):
ax.spines[key].set_visible(False)
def plot_ideal(freq, gain, ax):
freq = np.maximum(freq, xlim[0])
xs, ys = list(), list()
my_freq, my_gain = list(), list()
for ii in range(len(freq)):
xs.append(freq[ii])
ys.append(ylim[0])
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
xs += [freq[ii], freq[ii + 1]]
ys += [ylim[1]] * 2
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (ylim[0] / 10.)))
ax.fill_between(xs, ylim[0], ys, color='r', alpha=0.1)
ax.semilogx(my_freq, my_gain, 'r--', alpha=0.5, linewidth=4, zorder=3)
xticks = [1, 2, 4, 10, 20, 40, 100, 200, 400]
ax.set(xlim=xlim, ylim=ylim, xticks=xticks, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
ax.set(xticklabels=xticks)
box_off(ax)
half_height = np.array(plt.rcParams['figure.figsize']) * [1, 0.5]
ax = plt.subplots(1, figsize=half_height)[1]
plot_ideal(freq, gain, ax)
ax.set(title='Ideal %s Hz lowpass' % f_p)
mne.viz.tight_layout()
plt.show()
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontunity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in frequency is actually sinc_ function
# in time, which requires an infinite number of samples, and thus infinite
# time, to represent. So although this filter has ideal frequency suppression,
# it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
def plot_filter(h, title, freq, gain, show=True):
if h.ndim == 2: # second-order sections
sos = h
n = mne.filter.estimate_ringing_samples(sos)
h = np.zeros(n)
h[0] = 1
h = signal.sosfilt(sos, h)
H = np.ones(512, np.complex128)
for section in sos:
f, this_H = signal.freqz(section[:3], section[3:])
H *= this_H
else:
f, H = signal.freqz(h)
fig, axs = plt.subplots(2)
t = np.arange(len(h)) / sfreq
axs[0].plot(t, h, color=blue)
axs[0].set(xlim=t[[0, -1]], xlabel='Time (sec)',
ylabel='Amplitude h(n)', title=title)
box_off(axs[0])
f *= sfreq / (2 * np.pi)
axs[1].semilogx(f, 10 * np.log10((H * H.conj()).real), color=blue,
linewidth=2, zorder=4)
plot_ideal(freq, gain, axs[1])
mne.viz.tight_layout()
if show:
plt.show()
plot_filter(h, 'Sinc (0.1 sec)', freq, gain)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 sec) gets us a
# bit better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here:
n = int(round(1. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, 'Sinc (1.0 sec)', freq, gain)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 sec),
# with a resulting larger x-axis:
n = int(round(10. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, 'Sinc (10.0 sec)', freq, gain)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire second. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`, `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightfroward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precisel control of all frequency
# regions, here we will use and explore primarily windowed FIR
# design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=half_height)[1]
plot_ideal(freq, gain, ax)
ax.set(title='%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth))
mne.viz.tight_layout()
plt.show()
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a
# smoother slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 sec filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 10-Hz transition (1.0 sec)', freq, gain)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 10-Hz transition (0.5 sec)', freq, gain)
###############################################################################
# But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 10-Hz transition (0.2 sec)', freq, gain)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 50-Hz transition (0.2 sec)', freq, gain)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random + line). Note that the original, clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur))
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR and compensate for
# the delay:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
x_shallow = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, 'MNE-Python 0.14 default', freq, gain)
###############################################################################
# This is actually set to become the default type of filter used in MNE-Python
# in 0.14 (see :ref:`tut_filtering_in_python`).
#
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
x_steep = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
plot_filter(h, 'MNE-Python 0.13 default', freq, gain)
###############################################################################
# Finally, Let's also filter it with the
# MNE-C default, which is a long-duration steep-slope FIR filter designed
# using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, 'MNE-C default', freq, gain)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filhters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially on signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axs = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
t = np.arange(len(x)) / sfreq
axs[0].plot(t, x + offset)
axs[0].set(xlabel='Time (sec)', xlim=t[[0, -1]])
box_off(axs[0])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axs[1].plot(freqs, 20 * np.log10(np.abs(X)))
axs[1].set(xlim=xlim)
yticks = np.arange(5) / -30.
yticklabels = ['Original', 'Noisy', 'FIR-shallow (0.14)', 'FIR-steep (0.13)',
'FIR-steep (MNE-C)']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
plot_signal(x_mne_c, offset=yticks[4])
axs[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.150, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axs[0].get_yticklabels():
text.set(rotation=45, size=8)
axs[1].set(xlim=flim, ylim=ylim, xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
box_off(axs[0])
box_off(axs[1])
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter, and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few orders of filter,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(sos, 'Butterworth order=2', freq, gain)
# Eventually this will just be from scipy signal.sosfiltfilt, but 0.18 is
# not widely adopted yet (as of June 2016), so we use our wrapper...
sosfiltfilt = mne.fixes.get_sosfiltfilt()
x_shallow = sosfiltfilt(sos, x)
###############################################################################
# The falloff of this filter is not very steep.
#
# .. warning:: For brevity, we do not show the phase of these filters here.
# In the FIR case, we can design linear-phase filters, and
# compensate for the delay (making the filter acausal) if
# necessary. This cannot be done
# with IIR filters, as they have a non-linear phase.
# As the filter order increases, the
# phase distortion near and in the transition band worsens.
# However, if acausal (forward-backward) filtering can be used,
# e.g. with :func:`scipy.signal.filtfilt`, these phase issues
# can be mitigated.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given in tut_filtering_basics_ use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used when possible to do IIR filtering.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(sos, 'Butterworth order=8', freq, gain)
x_steep = sosfiltfilt(sos, x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=1) # dB of acceptable pass-band ripple
plot_filter(sos, 'Chebychev-1 order=8, ripple=1 dB', freq, gain)
###############################################################################
# And if we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=6)
plot_filter(sos, 'Chebychev-1 order=8, ripple=6 dB', freq, gain)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axs = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axs[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axs[0].get_yticklabels():
text.set(rotation=45, size=8)
axs[1].set(xlim=flim, ylim=ylim, xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
box_off(axs[0])
box_off(axs[1])
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are acausal (zero-phase), can make
# activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen 2011 [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to smulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet 2012 [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger 2012 [6]_ that
# the problematic low-pass filters from VanRullen 2011 [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* 2012 [4]_ to:
#
# "...generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* 2015 [7]_ also came to suggest a 0.1 Hz
# highpass. And more evidence followed in Tanner *et al.* 2015 [8]_ of such
# distortions. Using data from language ERP studies of semantic and syntactic
# processing (i.e., N400 and P600), using a high-pass above 0.3 Hz caused
# significant effects to be introduced implausibly early when compared to the
# unfiltered data. From this, the authors suggested the optimal high-pass
# value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* 2015 [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV, onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass and
# high-pass filters... No visible distortion to the original waveform
# [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = 'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axs = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axs, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
box_off(ax)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck 2010 [12]_,
# they found that applying a 1 Hz high-pass decreased the probaility of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 HZ or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving :ref:`ch_sample_data`,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* 2015 [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* 2016 [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* [10]_ rebutted that baseline correction can correct for
# problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axs = plt.subplots(3, 2)[1]
for ri, (axs, freq) in enumerate(zip(all_axs, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axs):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('' if ci == 0 else 'No ') +
'Baseline Correction')
box_off(ax)
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In respose, Maess *et al.* 2016 [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multielectrode recordings
# the topology (i.e., spatial pattiern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin2`. In Widmann *et al.* 2015 [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 45.0 | 11.25 | 5.0 |
# +------------------+-------------------+-------------------+
# | 48.0 | 12.0 | 2.0 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 6.2, 6.6, or 11.0 for the Hann, Hamming,
# or Blackman windows, respectively as selected by the ``fir_window``
# argument.
#
# .. note:: These multiplicative factors are double what is given in
# Ifeachor and Jervis [2]_ (p. 357). The window functions have a
# smearing effect on the frequency response; I&J thus take the
# approach of setting the stop frequency as
# :math:`f_s = f_p + f_{trans} / 2.`, but our stated definitions of
# :math:`f_s` and :math:`f_{trans}` do not
# allow us to do this in a nice way. Instead, we increase our filter
# length to achieve acceptable (20+ dB) attenuation by
# :math:`f_s = f_p + f_{trans}`, and excellent (50+ dB)
# attenuation by :math:`f_s + f_{trans}` (and usually earlier).
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut_artifacts_filter`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M-EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in [7]_. Briefly:
#
# * EEGLAB
# MNE-Python in 0.14 defaults to behavior very similar to that of EEGLAB,
# see the `EEGLAB filtering FAQ`_ for more information.
# * Fieldrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more inforamtion, see e.g. `FieldTrip band-pass documentation`_.
#
# Summary
# =======
#
# When filtering, there are always tradeoffs that should be considered.
# One important tradeoff is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. http://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. http://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artefacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: http://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: http://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: http://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _fieldtrip band-pass documentation: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter # noqa
|
alexandrebarachant/mne-python
|
tutorials/plot_background_filtering.py
|
Python
|
bsd-3-clause
| 42,317
|
[
"Gaussian"
] |
47f5bd416d472d93f2339f310666719725a038f59432e29987771730a8c400a1
|
from django.shortcuts import render
# Create your views here.
import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from cenet.models import Neuron, SynapticConn, CENetwork
from django.db.models import Q
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from lems_ui.models import LemsModel, ParameterisedModel, Lems2FpgaJob
from lems_ui.models import all_available_param_models, MODEL_TYPES
from rtw_ui.models import RTW_CONF
from lems_ui.models import LemsElement
from lems.model import model, component, dynamics
from lems.parser import LEMS
from django.shortcuts import render, redirect
import json
from django.conf import settings
User = settings.AUTH_USER_MODEL
LOG = logging.getLogger(__name__)
def hier_details(this_comp_name):
lems_model = model.Model()
parser = LEMS.LEMSFileParser(lems_model)
this_ret_data = {'name': this_comp_name, 'exposures':[]}
while 1:
# get the lems element
this_lems_elem = LemsElement.objects.get(lems_elem='component_types',
name=this_comp_name)
if not this_lems_elem.extends:
break
parser.parse('<LEMS>' + this_lems_elem.xml + '</LEMS>')
this_lems_obj = lems_model.component_types[this_comp_name]
for elem in this_lems_obj.exposures:
this_ret_data['exposures'].append(
{'name': elem.name, 'dim': elem.dimension})
this_comp_name = this_lems_elem.extends
return this_ret_data
@login_required
def delete_rtw_model(rq, model_name=None):
try:
# user can only delete models they own
model = RTW_CONF.objects.get(name=model_name, owner=rq.user)
model.delete()
except:
model = None
return redirect('/djlems/rtw_ui/dashboard/')
@login_required
def rtw_ui(rq,rtw_id=None):
if not rq.user.is_authenticated():
return render(rq, 'base/base.heml', {'user':rq.user})
open_rtw_conf = None
network_confs = CENetwork.objects.filter(Q(owner=rq.user) | Q(public=True)).order_by('name')
# all models shared with user
models = all_available_param_models(rq.user, MODEL_TYPES.NEURON)
for model in models:
this_comps = json.loads(model.json_data)['comps']
for key, value in this_comps.items():
#if value['name'] == 'neuron_model':
model.ctjson_data = json.dumps( hier_details(value['type']))
if rtw_id is not None:
open_rtw_conf = RTW_CONF.objects.get(name=rtw_id, owner=rq.user)
rtw_id = open_rtw_conf.id
return render(rq, 'rtw_ui/rtw_ui.html',
{'user':rq.user, 'rtw_id':rtw_id, 'network_confs':network_confs,
'neurons': Neuron.objects.order_by('name'),
'open_rtw_conf':open_rtw_conf, 'models' : models})
@csrf_exempt
def save_conf(rq):
post_data = rq.POST['msg']
data = json.loads(post_data)
LOG.debug('SAVING NETWORK: ' + data['rtw_name'])
data['net_name'] = data['net_name'].split(':')[0]
net = CENetwork.objects.get(name=data['net_name'], owner=rq.user)
try:
rtw = RTW_CONF.objects.get(name=data['rtw_name'], owner=rq.user)
except:
rtw = RTW_CONF()
rtw.owner = rq.user
rtw.name = data['rtw_name']
rtw.network = net
rtw.json = post_data
rtw.save()
net.status = "Ready for Synthesis"
net.save()
return HttpResponse(json.dumps('RTW: ' + rtw.name + ' saved ok'))
def get_net(rq, net_id=None):
LOG.debug("Get CElegans Network")
open_net = None
if(id):
try:
open_net = CENetwork.objects.get(id=net_id)
except:
pass
return HttpResponse((open_net.json))
@login_required
def view_RTW_dashboard (request):
sorted_rb_results = []
sorted_share = []
rtw_confs = RTW_CONF.objects.filter(Q(owner=request.user)).order_by('name')
rtw_confs_share = {}
rtw_confs_public = RTW_CONF.objects.filter(Q(public=True)).order_by('name')
return render(request, "rtw_ui/dashboard.html",
dict(rtw_confs = rtw_confs, rtw_confs_share = rtw_confs_share, rtw_confs_public =rtw_confs_public)
)
|
Si-elegans/Web-based_GUI_Tools
|
rtw_ui/views.py
|
Python
|
apache-2.0
| 4,292
|
[
"NEURON"
] |
d5b2a2616fb1a0091b99e741e28a684d504ef633b287ef226310cec4e4959479
|
"""
@name: PyHouse/src/Modules/Computer/Web/_test/test_web_mainpage.py
@author: D Brian Kimmel
@contact: D.BrianKimmel@gmail.com>
@copyright: (c) 2014-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 6, 2014
@Summary:
"""
__updated__ = '2019-03-01'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files and modules.
# from Modules.Web import web_mainpage
from test.testing_mixin import SetupPyHouseObj
from Modules.Computer.Web.web_mainpage import modulepath, webpath
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_web_mainpage')
class B1_Path(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Modulepath(self):
print(PrettyFormatAny.form(modulepath.path, 'B01-01-A - modulepath'))
print(PrettyFormatAny.form(webpath.path, 'B01-01-A - modulepath'))
self.assertEqual(modulepath.path, '/home/briank/workspace/PyHouse/Project/src/Modules/Computer/Web')
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Web/test/test_web_mainpage.py
|
Python
|
mit
| 1,391
|
[
"Brian"
] |
1d799e00dc39f45273256f47342f102a34c94543ebf5bbd11c79adf1db4b5eef
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from numba import templating
from numba import error, pipeline, nodes, ufunc_builder
from numba.minivect import specializers, miniast, miniutils
from numba import utils, functions
from numba import typesystem
from numba import visitors
from numba.support.numpy_support import slicenodes
from numba.vectorize import basic
print_ufunc = False
def is_elementwise_assignment(context, assmnt_node):
target_type = assmnt_node.targets[0].type
value_type = assmnt_node.value.type
if target_type.is_array:
# Allow arrays and scalars
context.promote_types(target_type, value_type)
return not value_type.is_object
return False
class ArrayExpressionRewrite(visitors.NumbaTransformer):
"""
Find element-wise expressions and run ElementalMapper to turn it into
a minivect AST or a ufunc.
"""
nesting_level = 0
elementwise = False
is_slice_assign = False
def register_array_expression(self, node, lhs=None):
"""
Start the mapping process for the outermost node in the array expression.
"""
self.elementwise = False
def visit_elementwise(self, elementwise, node):
if elementwise and self.nesting_level == 0:
return self.register_array_expression(node)
self.nesting_level += 1
self.generic_visit(node)
self.nesting_level -= 1
self.elementwise = elementwise
return node
def get_py_ufunc_ast(self, lhs, node):
if lhs is not None:
lhs.ctx = ast.Load()
builder = ufunc_builder.UFuncConverter()
tree = builder.visit(node)
ufunc_ast = builder.build_ufunc_ast(tree)
if print_ufunc:
from meta import asttools
module = ast.Module(body=[ufunc_ast])
print((asttools.python_source(module)))
# Vectorize Python function
if lhs is None:
restype = node.type
else:
restype = lhs.type.dtype
argtypes = [op.type.dtype if op.type.is_array else op.type
for op in builder.operands]
signature = restype(*argtypes)
return ufunc_ast, signature, builder
def get_py_ufunc(self, lhs, node):
ufunc_ast, signature, ufunc_builder = self.get_py_ufunc_ast(lhs, node)
py_ufunc = ufunc_builder.compile_to_pyfunc(ufunc_ast)
return py_ufunc, signature, ufunc_builder
def visit_Assign(self, node):
self.is_slice_assign = False
self.visitlist(node.targets)
target_node = node.targets[0]
is_slice_assign = self.is_slice_assign
self.nesting_level = self.is_slice_assign
node.value = self.visit(node.value)
self.nesting_level = 0
elementwise = self.elementwise
if (len(node.targets) == 1 and is_slice_assign and
is_elementwise_assignment(self.context, node)):
target_node = slicenodes.rewrite_slice(target_node,
self.nopython)
return self.register_array_expression(node.value, lhs=target_node)
return node
def visit_Subscript(self, node):
# print ast.dump(node)
self.generic_visit(node)
is_store = isinstance(node.ctx, ast.Store)
self.is_slice_assign = is_store and node.type.is_array
if is_store:
if nodes.is_ellipsis(node.slice):
return node.value
elif node.value.type.is_array and node.type.is_array:
node = slicenodes.rewrite_slice(node, self.nopython)
return node
def visit_MathNode(self, node):
elementwise = node.arg.type.is_array
return self.visit_elementwise(elementwise, node)
def visit_BinOp(self, node):
elementwise = node.type.is_array
return self.visit_elementwise(elementwise, node)
visit_UnaryOp = visit_BinOp
class ArrayExpressionRewriteUfunc(ArrayExpressionRewrite):
"""
Compile array expressions to ufuncs. Then call the ufunc with the array
arguments.
vectorizer_cls: the ufunc vectorizer to use
CANNOT be used in a nopython context
"""
def __init__(self, context, func, ast, vectorizer_cls=None):
super(ArrayExpressionRewriteUfunc, self).__init__(context, func, ast)
self.vectorizer_cls = vectorizer_cls or basic.BasicASTVectorize
def register_array_expression(self, node, lhs=None):
super(ArrayExpressionRewriteUfunc, self).register_array_expression(node,
lhs)
py_ufunc, signature, ufunc_builder = self.get_py_ufunc(lhs, node)
# Vectorize Python function
vectorizer = self.vectorizer_cls(py_ufunc)
vectorizer.add(restype=signature.return_type, argtypes=signature.args)
ufunc = vectorizer.build_ufunc()
# Call ufunc
args = ufunc_builder.operands
if lhs is None:
keywords = None
else:
keywords = [ast.keyword('out', lhs)]
func = nodes.ObjectInjectNode(ufunc)
call_ufunc = nodes.ObjectCallNode(signature=None, func=func, args=args,
keywords=keywords, py_func=ufunc)
return nodes.ObjectTempNode(call_ufunc)
class NumbaproStaticArgsContext(utils.NumbaContext):
"Use a static argument list: shape, data1, strides1, data2, strides2, ..."
astbuilder_cls = miniast.ASTBuilder
# debug = True
# debug_elements = True
class ArrayExpressionRewriteNative(ArrayExpressionRewrite):
"""
Compile array expressions to a minivect kernel that calls a Numba
scalar kernel with scalar inputs:
a[:, :] = b[:, :] * c[:, :]
becomes
tmp_a = slice(a)
tmp_b = slice(b)
tmp_c = slice(c)
shape = broadcast(tmp_a, tmp_b, tmp_c)
call minikernel(shape, tmp_a.data, tmp_a.strides,
tmp_b.data, tmp_b.strides,
tmp_c.data, tmp_c.strides)
with
def numba_kernel(b, c):
return b * c
def minikernel(...):
for (...)
for(...)
a[i, j] = numba_kernel(b[i, j], c[i, j])
CAN be used in a nopython context
"""
def array_attr(self, node, attr):
# Perform a low-level bitcast from object to an array type
# array = nodes.CoercionNode(node, float_[:])
array = node
return nodes.ArrayAttributeNode(attr, array)
def register_array_expression(self, node, lhs=None):
super(ArrayExpressionRewriteNative, self).register_array_expression(
node, lhs)
lhs_type = lhs.type if lhs else node.type
is_expr = lhs is None
if node.type.is_array and lhs_type.ndim < node.type.ndim:
# TODO: this is valid in NumPy if the leading dimensions of the
# TODO: RHS have extent 1
raise error.NumbaError(
node, "Right hand side must have a "
"dimensionality <= %d" % lhs_type.ndim)
# Create ufunc scalar kernel
ufunc_ast, signature, ufunc_builder = self.get_py_ufunc_ast(lhs, node)
signature.struct_by_reference = True
# Compile ufunc scalar kernel with numba
ast.fix_missing_locations(ufunc_ast)
func_env, (_, _, _) = pipeline.run_pipeline2(
self.env, None, ufunc_ast, signature,
function_globals={},
)
# Manual linking
lfunc = func_env.lfunc
# print lfunc
operands = ufunc_builder.operands
functions.keep_alive(self.func, lfunc)
operands = [nodes.CloneableNode(operand) for operand in operands]
if lhs is not None:
lhs = nodes.CloneableNode(lhs)
broadcast_operands = [lhs] + operands
lhs = lhs.clone
else:
broadcast_operands = operands[:]
shape = slicenodes.BroadcastNode(lhs_type, broadcast_operands)
operands = [op.clone for op in operands]
if lhs is None and self.nopython:
raise error.NumbaError(
node, "Cannot allocate new memory in nopython context")
elif lhs is None:
# TODO: determine best output order at runtime
shape = shape.cloneable
lhs = nodes.ArrayNewEmptyNode(lhs_type, shape.clone,
lhs_type.is_f_contig).cloneable
# Build minivect wrapper kernel
context = NumbaproStaticArgsContext()
context.llvm_module = self.env.llvm_context.module
# context.debug = True
context.optimize_broadcasting = False
b = context.astbuilder
variables = [b.variable(name_node.type, "op%d" % i)
for i, name_node in enumerate([lhs] + operands)]
miniargs = [b.funcarg(variable) for variable in variables]
body = miniutils.build_kernel_call(lfunc.name, signature, miniargs, b)
minikernel = b.function_from_numpy(
templating.temp_name("array_expression"), body, miniargs)
lminikernel, ctypes_kernel = context.run_simple(
minikernel, specializers.StridedSpecializer)
# Build call to minivect kernel
operands.insert(0, lhs)
args = [shape]
scalar_args = []
for operand in operands:
if operand.type.is_array:
data_p = self.array_attr(operand, 'data')
data_p = nodes.CoercionNode(data_p,
operand.type.dtype.pointer())
if not isinstance(operand, nodes.CloneNode):
operand = nodes.CloneNode(operand)
strides_p = self.array_attr(operand, 'strides')
args.extend((data_p, strides_p))
else:
scalar_args.append(operand)
args.extend(scalar_args)
result = nodes.NativeCallNode(minikernel.type, args, lminikernel)
# Use native slicing in array expressions
slicenodes.mark_nopython(ast.Suite(body=result.args))
if not is_expr:
# a[:] = b[:] * c[:]
return result
# b[:] * c[:], return new array as expression
return nodes.ExpressionNode(stmts=[result], expr=lhs.clone)
|
shiquanwang/numba
|
numba/array_expressions.py
|
Python
|
bsd-2-clause
| 10,462
|
[
"VisIt"
] |
6826a77983683e6ec14007ccf6860473442fff8d8fef69f1690424f20669d7cc
|
import astropy.units as uu
import astropy.cosmology as co
aa = co.Planck13
from scipy.interpolate import interp1d
import numpy as n
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
import glob
import sys
from scipy.optimize import curve_fit
import cPickle
from os.path import join
from scipy.optimize import minimize
import scipy.fftpack as f
import time
from hankel import SphericalHankelTransform
# Tinker 05 :
aa = 1/2**0.5
bb = 0.35
cc = 0.8
deltac = 1.68
bnu = lambda nu : 1 + aa**(-0.5)/(deltac) * ( aa**0.5*(aa*nu**2.) + aa*0.5 * bb * (aa * nu**2.)**(1-cc) - (aa*nu**2.)**cc / ((aa*nu**2.)**cc + bb*(1-cc)*(1-cc/2.) ) )
bvm = lambda vm, vcut : bnu(vm/vcut)
fun = lambda vm, a, b, c : b* (vm/c) + a*n.e**(vm**0.5/c**0.5)
xs = n.arange(60, 2000, 1)
zmin=-0.1
zmax=0.1
sig_low, sig_high, sig_mean, scale, bias, biasErr, vol, aon,logmps = n.loadtxt(join(os.environ['MVIR_DIR'], "halo-bias-measurement-summary.data"), unpack=True)
############
# Fit at redshift 0
############
sel0 = (aon==1)
res, cov = curve_fit(fun, sig_mean[sel0], bias[sel0], p0=(0.5, -0.5, 200), sigma=biasErr[sel0], maxfev=10000000)
"""
print res
print cov.diagonal()**0.5
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
p.plot(xs, fun(xs, res[0], res[1], res[2]), 'k--',label='fit')
sel = (vol==400**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='k', label='SMD', fmt='none')
sel = (vol==1000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='b', label='MDPL', fmt='none')
sel = (vol==2500**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='r', label='BigMD', fmt='none')
sel = (vol==4000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='m', label='HMD', fmt='none')
#-cb = p.colorbar()
#cb.set_label('z')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'bias')
p.xlim((50, 1500))
p.ylim((0.6,4.5))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-z0.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='k', label='SMD', fmt='none')
sel = (vol==1000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='b', label='MDPl', fmt='none')
sel = (vol==2500**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='r', label='BigMD', fmt='none')
sel = (vol==4000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='m', label='HMD', fmt='none')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'residual = data / model')
p.xlim((50, 1500))
p.ylim((0.8,1.2))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-residual-z0.png"))
p.clf()
"""
##############
# Fit at redshift trend
##############
a0, b0, c0 = res
xs = n.arange(60, 2000, 1)
a_pr = lambda zz, a1, a2 : a0 *(1+a1*zz+a2*zz**2.) # + a2 *lz**2. + a3 *lz**3. + a4 *lz**4.
b_pr = lambda zz, b1, b2 : b0 *(1+b1*zz+b2*zz**2.)# +b3*zz**3.) # + b2 *lz**2.+ b3 *lz**3.)
c_pr = lambda zz, c1, c2 : c0 *(1+c1*zz+c2*zz**2) # + b2 *lz**2.+ b3 *lz**3.)
vfG = lambda vm, zz, ps : a_pr(zz,ps[0], ps[1])*n.e**(vm**0.5/c_pr(zz,ps[4], ps[5])**0.5) + (b_pr(zz,ps[2], ps[3]))* (vm /c_pr(zz,ps[4], ps[5]))
#vfG = lambda vm, zz, ps : a_pr(zz,ps[0])*n.e**(vm**0.5/c_pr(zz,ps[3], ps[4])**0.5) + (b_pr(zz,ps[1], ps[2]))* (vm /c_pr(zz,ps[3], ps[4]))
p1=[0,0,0,0,0,0] # [0.28, -0.04, 0.7, -0.47, -0., -0.55, 0.28]
chi2fun = lambda ps : n.sum( (vfG(vmean,1/aon-1,ps) - bias)**2. / (2*biasErr)**2. )/(len(bias) - len(p1))
res2 = minimize(chi2fun, p1, method='Powell',options={'xtol': 1e-6, 'disp': True, 'maxiter' : 50000000000000})
pOpt = res2.x
cov = res2.direc
chi2perpoint = lambda ps : (vfG(vmean,1/aon-1,ps) - bias)**2. / (2*biasErr)**2.
chi2pp = chi2perpoint(pOpt)
n.savetxt(join("..","clustering","bias0-best_fit_params.txt"),n.transpose([pOpt,cov.diagonal()**0.5]))
vs = n.arange(60,1500,2)
X,Y = n.meshgrid(vs,n.arange(zmin, zmax+0.025,0.025))
Z = vfG(X,Y,pOpt)
n.savetxt(join("..","clustering","bias0-best_fit.txt"),n.transpose([n.hstack((X)), n.hstack((Y)), n.hstack((Z))]) )
###############################
#IMPLEMENT SINGLE REDSHIFT FITS TO GET THERIGHT PARAMETRIZATION
#SAVE DEPENDECE WITH REDSHIFT POINTS B-y WRITING THEM OUT
#WRITE REDSHIFT DEPENDENCE EQUATION IN THE PAPER
#######################################################
# now plots the results of the fit
print "now plots the results of the fit"
vmax_mod, z_mod, n_mod = n.loadtxt(join("..","clustering","bias0-best_fit.txt"), unpack=True)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(vmax_mod, n_mod, c=z_mod,s=5, marker='o',label="model", rasterized=True, vmin = zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel('bias') # slog$_{10}[ n(>M)]')
p.xlim((50, 1500))
p.ylim((0.6,4.5))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-zAll-model.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
p.errorbar(vmean, bias, yerr = 2*biasErr, fmt='none',elinewidth=0.5, mfc='none',ecolor='k',rasterized = True)
sc1=p.scatter(vmean, bias, c=1/aon-1,s=5, marker='o',label="model", rasterized=True, vmin = zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel('bias') # slog$_{10}[ n(>M)]')
p.xlim((50, 1500))
p.ylim((0.6,4.5))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-zAll-data.png"))
p.clf()
print "ndof=",len(bias)-len(pOpt)
print "ndata",len(bias)
print "maxchi2distance",n.max(chi2pp)
print "Noutliers=",len((chi2pp>1.5).nonzero()[0])
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(vmean, 1/aon-1, c=chi2pp,s=5, marker='o', rasterized=True, vmin = 0, vmax = 1.2)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label(r"(data-model)$^2$/(err data)$^2$")
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'z') # log$_{10}[ n(>M)]')
p.xlim((50, 1500))
p.grid()
p.savefig(join("..","clustering","halo-bias-zAll-chi2pp.png"))
p.clf()
sys.exit()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
p.scatter(vmean, bias**0.5, c=1./aon-1, s=20, edgecolors='none')
p.plot(xs, fun(xs, res[0], res[1], res[2]), 'k--')
#p.plot(xs, bvm(xs, res2[0]]), 'r--')
cb = p.colorbar()
cb.set_label('z')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'bias')
p.xlim((50, 3000))
p.ylim((0.6,4.5))
#p.yscale('log')
p.xscale('log')
#gl = p.legend(loc=2,fontsize=10)
#gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","ahalo-bias.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
p.plot(vmean[aon==1], bias[aon==1]**0.5/fun(vmean[aon==1], res[0], res[1], res[2]), 'bo')
#p.plot(vmean[aon==1], bias[aon==1]**0.5/bvm(vmean[aon==1], res2[0]), 'r+')
p.plot(vmean[aon==1], 1+ biasErr[aon==1]*bias[aon==1]**(-0.5), 'k+')
p.plot(vmean[aon==1], 1- biasErr[aon==1]*bias[aon==1]**(-0.5), 'k+')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'residual = data / model')
p.xlim((50, 3000))
p.ylim((0.7,1.3))
#p.yscale('log')
p.xscale('log')
#gl = p.legend(loc=2,fontsize=10)
#gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","ahalo-bias-residual.png"))
p.clf()
print res
print cov.diagonal()**0.5 / res
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='m', label='HMD')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max})$/model')
p.xlim((50, 3000))
p.ylim((0.9,1.1))
#p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","ahalo-bias-z0-residual-zoom.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='m', label='HMD')
p.plot(vmean, n.polyval(ps, vmean), 'r--', lw=2,label='fit')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max}$)')
p.xlim((50, 3000))
#p.ylim((0.1,100))
p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","ahalo-bias-z0.png"))
p.clf()
sys.exit()
ps = n.polyfit(vmean**2., bias, degree)#, w = 1./(biasErr))
n.savetxt("fit-halo-bias2-vmax2.data",ps)
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='m', label='HMD')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max})$/model')
#p.xlim((50, 3000))
p.ylim((0.7,1.3))
#p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","bhalo-bias-vm2-z0-residual.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='m', label='HMD')
p.plot(vmean**2., n.polyval(ps, vmean**2.), 'r--', lw=2,label='fit')
p.xlabel(r'$V^2_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max}$)')
#p.xlim((50, 3000))
#p.ylim((0.1,100))
p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","bhalo-bias-vm2-z0.png"))
p.clf()
sys.exit()
for vmin in vmins:
list44 = n.array(glob.glob(join("..","MD_1Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_050_xiR.pkl")))
list42 = n.array(glob.glob(join("..","MD_0.4Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_050_xiR.pkl")))
list41 = n.array(glob.glob(join("..","MD_2.5Gpc","halo_bias","clustering","hlist_80_vmax_"+vmin+"*rmax_140_xiR.pkl")))
list43 = n.array(glob.glob(join("..","MD_4Gpc","halo_bias","clustering","hlist_128_vmax_"+vmin+"*rmax_140_xiR.pkl")))
list40=n.hstack((list41, list42, list43, list44))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.15,0.15,0.6,0.75])
for ii in range(len(list40)):
f=open(list40[ii],'r')
bin_xi3D,xis, DR, volume, dV, pairCount, pairs, Ntotal, nD, nR, vbinsL, vbinsH = cPickle.load(f)
f.close()
if list40[ii].split('\\')[1] == "MD_0.4Gpc":
color = 'w'
volume = 400**3.
if list40[ii].split('\\')[1] == "MD_1Gpc":
color = 'b'
volume = 1000**3.
if list40[ii].split('\\')[1] == "MD_2.5Gpc":
color = 'r'
volume = 2500**3.
if list40[ii].split('\\')[1] == "MD_4Gpc":
color = 'm'
volume = 4000**3.
DR_rb = DR[::2][:-1] + DR[1::2]
dV_rb = dV[::2][:-1] + dV[1::2]
xi_rb = DR_rb*volume/(dV_rb * pairCount) -1.
rr = (bin_xi3D[1:] + bin_xi3D[:-1])/2.
rr_rb = bin_xi3D[::2][1:]
p.plot(rr_rb, rr_rb*rr_rb*xi_rb,label= list40[ii].split('\\')[1], c = color)
p.plot(Rs,Rs*Rs*xiR,'b--',label='DM linear theory')
p.xlabel('r Mpc/h')
p.ylabel(r'$r^2 \xi$(MD) (r)')
p.xlim((0,200))
p.ylim((-50,100))
p.title(str(n.round(vbinsL))+"<vmax<"+str(n.round(vbinsH))+" z=0")
#p.yscale('log')
#p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","xi-MD-"+vmin+".png"))
p.clf()
for vmin in vmins:
list44 = n.array(glob.glob(join("..","MD_1Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list42 = n.array(glob.glob(join("..","MD_0.4Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list41 = n.array(glob.glob(join("..","MD_2.5Gpc","halo_bias","clustering","hlist_80_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list43 = n.array(glob.glob(join("..","MD_4Gpc","halo_bias","clustering","hlist_128_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list40=n.hstack((list41, list42, list43, list44))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.15,0.15,0.6,0.75])
for ii in range(len(list40)):
f=open(list40[ii],'r')
bin_xi3D,xis, DR, volume, dV, pairCount, pairs, Ntotal, nD, nR, vbinsL, vbinsH = cPickle.load(f)
f.close()
if list40[ii].split('\\')[1] == "MD_0.4Gpc":
color = 'k'
volume = 400**3.
if list40[ii].split('\\')[1] == "MD_1Gpc":
color = 'b'
volume = 1000**3.
if list40[ii].split('\\')[1] == "MD_2.5Gpc":
color = 'r'
volume = 2500**3.
if list40[ii].split('\\')[1] == "MD_4Gpc":
color = 'm'
volume = 4000**3.
xi = DR*volume/(dV * pairCount) -1.
rr = (bin_xi3D[1:] + bin_xi3D[:-1])/2.
p.plot(rr, rr*xi,label= list40[ii].split('\\')[1], c = color)
p.plot(Rs,Rs*xiR,'b--',label='DM linear theory')
p.xlabel('r Mpc/h')
p.ylabel(r'$r \xi$(MD) (r)')
p.xlim((0.01,20))
p.ylim((1.,200))
p.title(str(n.round(vbinsL))+"<vmax<"+str(n.round(vbinsH))+" z=0")
p.yscale('log')
p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","xi-lt20-MD-"+vmin+".png"))
p.clf()
sys.exit()
##########################################################3
##########################################################3
##########################################################3
# Z=1
##########################################################3
##########################################################3
##########################################################3
list40 = n.array(glob.glob(join("..","MD_1Gpc","halo_bias","clustering","hlist_1.0*_vmax_*_xiR.pkl")))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.17,0.17,0.6,0.8])
for ii in range(len(list40)):
f=open(list40[ii],'r')
bin_xi3D_04,xis_04, DR_04, volume_04, dV_04, pairCount_04, pairs_04, Ntotal_04, nD_04, nR_04, vbinsL_04, vbinsH_04 = cPickle.load(f)
f.close()
fv_04 = 1000**3. / volume_04
xi04V = fv_04*(xis_04+1)-1.
rr = (bin_xi3D_04[1:] + bin_xi3D_04[:-1])/2.
p.plot(rr,rr*rr*xi04V,label= str(n.round(vbinsL_04))+"<vmax<"+str(n.round(vbinsH_04)))
p.xlabel('r Mpc/h')
p.ylabel(r'$r^2 \xi$(BigMDPL) (r)')
p.xlim((0,200))
p.ylim((-1,200))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","xi-MDPL.png"))
p.show()
list40 = n.array(glob.glob(join("..","MD_2.5Gpc","halo_bias","clustering","hlist_80*_vmax_*_xiR.pkl")))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.17,0.17,0.6,0.8])
for ii in range(len(list40))[:-3][::2]:
f=open(list40[ii],'r')
bin_xi3D_04,xis_04, DR_04, volume_04, dV_04, pairCount_04, pairs_04, Ntotal_04, nD_04, nR_04, vbinsL_04, vbinsH_04 = cPickle.load(f)
f.close()
fv_04 = 2500**3. / volume_04
xi04V = fv_04*(xis_04+1)-1.
rr = (bin_xi3D_04[1:] + bin_xi3D_04[:-1])/2.
p.plot(rr,rr*rr*xi04V,label= str(n.round(vbinsL_04))+"<vmax<"+str(n.round(vbinsH_04)))
p.xlabel('r Mpc/h')
p.ylabel(r'$r^2 \xi$(BigMDPL) (r)')
p.xlim((0,200))
p.ylim((-1,200))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","xi-BigMDPL.png"))
p.show()
list04 = n.array(glob.glob(join("..","MD_0.4Gpc","halo_bias","clustering","hlist_1.00*_vmax_*_xiR.pkl")))
list04.sort()
# print list04
p.figure(0,(11,6))
p.axes([0.17,0.17,0.6,0.8])
for ii in range(len(list04)): #[::2]:
f=open(list04[ii],'r')
bin_xi3D_04,xis_04, DR_04, volume_04, dV_04, pairCount_04, pairs_04, Ntotal_04, nD_04, nR_04, vbinsL_04, vbinsH_04 = cPickle.load(f)
f.close()
fv_04 = 400**3. / volume_04
xi04V = fv_04*(xis_04+1)-1.
rr = (bin_xi3D_04[1:] + bin_xi3D_04[:-1])/2.
p.plot(rr,xi04V,label= str(n.round(vbinsL_04))+"<vmax<"+str(n.round(vbinsH_04)))
xrr = n.arange(0,50,0.5)
#p.plot(xrr,20*xrr**(-1.8),'k--',lw=2)
p.axvline(3)
p.axvline(7)
p.xlabel('r Mpc/h')
p.ylabel('xi(MDPL) (r)')
p.xlim((0.1,15))
p.ylim((0.1,200))
p.yscale('log')
p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","xi-SMDPL.png"))
p.show()
|
JohanComparat/nbody-npt-functions
|
bin/bin_onePT/halo-bias-1-fit.py
|
Python
|
cc0-1.0
| 18,224
|
[
"TINKER"
] |
718c373a21c4d87006558463cdb5be8f4ae6711d2cb814825f995ab3ef2a5160
|
import os
import sys
import numpy as np
def calc_accuracy_objective(f_soc, allelectron_forces_file):
"""
Calculates accuracy objective by comparing socorro and
all electron forces at each atomic structure tested.
"""
f_allelectron = read_allelectron_forces(filename=allelectron_forces_file)
return force_objective(f_soc, f_allelectron)
def force_objective(f_soc, f_allelectron):
"""
f_soc is list of numpy arrays, where each element of list represents
on atomic configuration. The numpy arrays are N by 3 where N is the number o
of atoms in the crystal.
"""
assert f_allelectron.shape[1]%3 == 0, "Each atom should have three force components."
num_configs = f_allelectron.shape[0]
num_atoms = (f_allelectron.shape[1])/3
# accuracy objective is rmsd of magnitude of difference between socorro and elk force vectors
force_obj_unweighted = np.sqrt( np.sum((f_soc-f_allelectron)**2.)/num_configs/num_atoms )
#force_obj = force_weight * force_obj_unweighted
return force_obj_unweighted
def read_allelectron_forces(filename='allelectron_forces.dat'):
"""
Read 'correct' forces as calculated from Elk from file.
Returns an MxN numpy array where M is the number of atomic structures and N/3 is the number of atoms
in the structures.
"""
ae_forces = np.loadtxt(filename)
return ae_forces
# #--- atan objective --------------------------------------------------------------------
# # read last (fifth) column from each scattering file, which is a single RMS of data from logderivative file
#
# atan_sum = 0.
# scattering_file_count = 0
# for elem in element_list:
# dir = elem+'_pseudopotential'
# os.chdir(dir)
# for file in os.listdir('.'):
# if file.startswith("scattering."):
# scattering_data = np.loadtxt(file)
# atan_and_core_error = scattering_data[4]
# atan_sum += atan_and_core_error
# scattering_file_count += 1
# os.chdir('..')
#
# # division by pi/2 should normalize to 1 if E_fit = E_min
# atan_obj_unweighted = atan_sum / float(scattering_file_count) / (np.pi/2.)
# atan_obj = atan_weight * atan_obj_unweighted
# #---------------------------------------------------------------------------------------
#
# #total_obj = force_obj + atan_obj
#
# # # print accuracy objective
# # print "force_objective = ", force_obj
# # print "atan_objective = ", atan_obj
# # print "total_accuracy_objective =", total_obj
|
caseynbrock/opal2
|
calc_accuracy.py
|
Python
|
mit
| 2,508
|
[
"CRYSTAL",
"Elk"
] |
59121556b13fb0ec94c6f0a26c5e859c41cd682e372bb23c67d572ec50ef92f7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.