text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test pickling of Iris objects.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import cPickle
import io
import numpy as np
import iris
class TestPickle(tests.IrisTest):
def pickle_then_unpickle(self, obj):
"""Returns a generator of ("cpickle protocol number", object) tuples."""
for protocol in range(1 + cPickle.HIGHEST_PROTOCOL):
bio = io.BytesIO()
cPickle.dump(obj, bio, protocol)
# move the bio back to the start and reconstruct
bio.seek(0)
reconstructed_obj = cPickle.load(bio)
yield protocol, reconstructed_obj
def assertCubeData(self, cube1, cube2):
np.testing.assert_array_equal(cube1.lazy_data().ndarray(),
cube2.lazy_data().ndarray())
@tests.skip_data
def test_cube_pickle(self):
cube = iris.load_cube(tests.get_data_path(('PP', 'globClim1', 'theta.pp')))
self.assertTrue(cube.has_lazy_data())
self.assertCML(cube, ('cube_io', 'pickling', 'theta.cml'), checksum=False)
for _, recon_cube in self.pickle_then_unpickle(cube):
self.assertTrue(recon_cube.has_lazy_data())
self.assertCML(recon_cube, ('cube_io', 'pickling', 'theta.cml'), checksum=False)
self.assertCubeData(cube, recon_cube)
@tests.skip_data
def test_cube_with_deferred_coord_points(self):
# Data with 2d lats and lons that when loaded results in points that
# are LazyArray objects.
filename = tests.get_data_path(('NetCDF',
'rotated',
'xy',
'rotPole_landAreaFraction.nc'))
cube = iris.load_cube(filename)
# Pickle and unpickle. Do not perform any CML tests
# to avoid side effects.
_, recon_cube = next(self.pickle_then_unpickle(cube))
self.assertEqual(recon_cube, cube)
@tests.skip_data
def test_cubelist_pickle(self):
cubelist = iris.load(tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp')))
single_cube = cubelist[0]
self.assertCML(cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(single_cube, ('cube_io', 'pickling', 'single_cube.cml'))
for _, reconstructed_cubelist in self.pickle_then_unpickle(cubelist):
self.assertCML(reconstructed_cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(reconstructed_cubelist[0], ('cube_io', 'pickling', 'single_cube.cml'))
for cube_orig, cube_reconstruct in zip(cubelist, reconstructed_cubelist):
self.assertArrayEqual(cube_orig.data, cube_reconstruct.data)
self.assertEqual(cube_orig, cube_reconstruct)
def test_picking_equality_misc(self):
items_to_test = [
iris.unit.Unit("hours since 2007-01-15 12:06:00", calendar=iris.unit.CALENDAR_STANDARD),
iris.unit.as_unit('1'),
iris.unit.as_unit('meters'),
iris.unit.as_unit('no-unit'),
iris.unit.as_unit('unknown')
]
for orig_item in items_to_test:
for protocol, reconstructed_item in self.pickle_then_unpickle(orig_item):
fail_msg = ('Items are different after pickling at protocol %s.'
'\nOrig item: %r\nNew item: %r' % (protocol, orig_item, reconstructed_item)
)
self.assertEqual(orig_item, reconstructed_item, fail_msg)
if __name__ == "__main__":
tests.main()
| jkettleb/iris | lib/iris/tests/test_pickling.py | Python | lgpl-3.0 | 4,614 | [
"NetCDF"
] | 25cb173fe7ec51d26326c4eb234c717380c72ed01e9bf39544d515bb88be3ef6 |
#!/usr/bin/env python2
# Copyright 2011, 2012 Alexandre Gravier (al.gravier@gmail)
# This file is part of PyCogMo.
# PyCogMo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyCogMo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyCogMo. If not, see <http://www.gnu.org/licenses/>.
import itertools
import logging
from logging import NullHandler
from mock import Mock, patch
from nose import with_setup
from nose.tools import eq_, raises, timed
from ui.graphical.pynn_to_visu import *
import pyNN.brian as pynnn
DUMMY_LOGGER = logging.getLogger("testLogger")
DUMMY_LOGGER.addHandler(NullHandler())
A = None
pynnn.setup()
def setup_adapter():
global A
A = PynnToVisuAdapter(DUMMY_LOGGER)
# holder class ("namespace") for the test variables
class Tns(object):
pass
def setup_and_fill_adapter():
setup_adapter()
Tns.pop_size = 27
Tns.pynn_pop1 = pynnn.Population(Tns.pop_size, pynnn.IF_cond_alpha)
Tns.ids1 = [int(u) for u in Tns.pynn_pop1.all()]
Tns.pynn_pop2 = pynnn.Population(Tns.pop_size, pynnn.IF_cond_alpha,
structure=pynnn.space.Grid3D())
Tns.ids2 = [int(u) for u in Tns.pynn_pop2.all()]
A.add_pynn_population(Tns.pynn_pop1)
Tns.pop2_alias = "testmap"
A.add_pynn_population(Tns.pynn_pop2, alias=Tns.pop2_alias)
Tns.pynn_proj1 = pynnn.Projection(Tns.pynn_pop1, Tns.pynn_pop2,
pynnn.OneToOneConnector())
Tns.pynn_proj2 = pynnn.Projection(Tns.pynn_pop2, Tns.pynn_pop1,
pynnn.AllToAllConnector())
A.add_pynn_projection(Tns.pynn_pop1, Tns.pynn_pop2,
Tns.pynn_proj1)
A.add_pynn_projection(Tns.pynn_pop2, Tns.pynn_pop1,
Tns.pynn_proj2)
@with_setup(setup_adapter)
def test_adapter_locked_states():
"basic lock/unlock for changes adapter state test"
assert A.check_open()
A.commit_structure()
assert not A.check_open()
@with_setup(setup_adapter)
def test_adapter_methods_call_check_open():
"""methods in the methods_checking_open list have called check_open"""
A.check_open = Mock(return_value=True)
pynn_pop1 = pynnn.Population(1, pynnn.IF_cond_alpha)
pynn_pop2 = pynnn.Population(1, pynnn.IF_cond_alpha)
pynn_prj = pynnn.Projection(
pynn_pop1, pynn_pop2,
pynnn.OneToOneConnector(),
target='excitatory')
pynn_u = pynn_pop1[0]
methods_checking_open = [
[A.assert_open, ()],
[A.commit_structure, ()],
[A.add_pynn_population, (pynn_pop1,)],
[A.add_pynn_projection, (pynn_pop1, pynn_pop1,
pynn_prj)]]
for m in methods_checking_open:
m[0](*m[1])
assert A.check_open.called, \
m[0].__name__ + " does not call check_open."
A.check_open.reset_mock()
PATCH = None
def setup_mock_unit_unit_id():
global PATCH
PATCH = patch.object(pynnn.simulator.ID, "__int__")
PATCH.start()
def teardown_mock_unit_id():
global PATCH
PATCH.stop()
PATCH = None
@with_setup(setup_mock_unit_unit_id, teardown_mock_unit_id)
@with_setup(setup_adapter)
def test_add_pynn_population_processes_all_units():
"""add_pynn_population checks the int value of each unit it's given."""
pop_size = 27
pynnn.simulator.ID.__int__.return_value = 1
pynn_pop1 = pynnn.Population(pop_size, pynnn.IF_cond_alpha)
A.add_pynn_population(pynn_pop1)
for u in pynn_pop1.all():
assert u.__int__.call_count == pop_size, \
"units missed in the 2D case"
pynnn.simulator.ID.__int__.reset_mock()
pynnn.simulator.ID.__int__.return_value = 1
pynn_pop2 = pynnn.Population(pop_size, pynnn.IF_cond_alpha,
structure = pynnn.space.Grid3D())
A.add_pynn_population(pynn_pop2, alias = "testmap")
for u in pynn_pop2.all():
assert u.__int__.call_count == pop_size, "units missed in the 3D case"
@with_setup(setup_and_fill_adapter)
def test_add_pynn_population_sets_up_labels_and_aliases():
pynn_pop3 = pynnn.Population(1, pynnn.IF_cond_alpha)
A.add_pynn_population(pynn_pop3)
assert A.aliases[Tns.pynn_pop1.label] == Tns.pynn_pop1.label
assert A.aliases[Tns.pynn_pop2.label] == Tns.pop2_alias
assert A.aliases[pynn_pop3.label] == pynn_pop3.label
@with_setup(setup_adapter)
def test_adapter_keeps_unit_count():
"""Add_pynn_population and commit_structure result in consistent number of
units."""
assert A.num_units == 0
pop_size = 27
pynn_pop1 = pynnn.Population(pop_size, pynnn.IF_cond_alpha)
A.add_pynn_population(pynn_pop1, alias = "soilwork")
pynn_pop2 = pynnn.Population(pop_size, pynnn.IF_cond_alpha,
structure = pynnn.space.Grid3D())
A.add_pynn_population(pynn_pop2)
A.commit_structure()
assert A.num_units == pop_size * 2
@with_setup(setup_and_fill_adapter)
def test_add_pynn_projection_adds_all_connections():
"""Tests if add_pynn_projection adds exactly the connections it's
given to its internal units_connections list."""
for c in itertools.groupby(A.units_connections, key=lambda x:x[0]):
out_it = c[1] # iterator on outbounds cx from unit c[0]
if c[0] in Tns.ids1:
assert out_it.next()[1] in Tns.ids2
try:
out_it.next()
assert False, ("There should only be one outbound connection"
" from this unit.")
except StopIteration:
pass
elif c[0] in Tns.ids2:
o_l = [o[1] for o in out_it]
assert set(o_l) == set(Tns.ids1)
else:
assert False, "Unit ID inexistent on the PyNN side."
@with_setup(setup_and_fill_adapter)
def test_commit_structure_results_in_complete_output_struct():
"""Tests the completeness of the output structure's units and
connections."""
# TODO: compare A.output_struct and a hand-made version. only doable when
# VisualisableNEtworkStructure is done and tested.
A.commit_structure()
out_units = set([u.unit_id for u in A.output_struct.units])
assert out_units == set(Tns.ids2 + Tns.ids1)
out_maps_aliases = A.output_struct.maps
loma = list(out_maps_aliases.iterkeys())
assert len(loma) == 2
assert "testmap" in loma
out_units_conn = A.output_struct.units_conn
# one to one in id1 -> id2, all to all in id2 -> id1
out_conn = set([(s, r) for (s, r, _) in out_units_conn])
for s, r in itertools.izip(Tns.ids1, Tns.ids2):
assert (s, r) in out_conn
for s, r in itertools.product(Tns.ids2, Tns.ids1):
assert (s, r) in out_conn
assert len(out_conn) == Tns.pop_size + Tns.pop_size**2
out_maps_conn = A.output_struct.maps_conn
assert len(out_maps_conn) == 2
| agravier/pycogmo | tests/pynn_to_visu_tests.py | Python | gpl-3.0 | 7,283 | [
"Brian"
] | 45ef31f01be303faac027206c2612f9daf052330eb07418aadbced78db931262 |
""" Test the SSLTransport mechanism """
import os
import select
import socket
import threading
from pytest import fixture
from DIRAC.Core.Security.test.x509TestUtilities import CERTDIR, USERCERT, getCertOption
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.DISET.private.Transports import PlainTransport, GSISSLTransport, M2SSLTransport
# TODO: Expired hostcert
# TODO: Expired usercert
# TODO: Expired proxy
# TODO: Invalid/missing CA
# TODO: Connect Timeouts
# TODO: SSL Algorithms & Ciphers
# TODO: Missing hostcert
# TODO: Missing usercert
# TODO: Missing proxy
# TODO: Session test?
# TODO: Reload of CAs?
# Define all the locations
caLocation = os.path.join(CERTDIR, 'ca')
hostCertLocation = os.path.join(CERTDIR, 'host/hostcert.pem')
hostKeyLocation = os.path.join(CERTDIR, 'host/hostkey.pem')
proxyFile = os.path.join(os.path.dirname(__file__), 'proxy.pem')
MAGIC_QUESTION = "Who let the dog out"
MAGIC_ANSWER = "Who, Who, who ?"
PORT_NUMBER = 50000
# Transports are now tested in pairs:
# "Server-Client"
# This allows for interoperatbility tests between GSI and M2 versions.
# Each pair is defined as a string.
TRANSPORTTESTS = ("Plain-Plain",
"M2-M2",
"M2-GSI",
"GSI-GSI",
"GSI-M2")
# https://www.ibm.com/developerworks/linux/library/l-openssl/index.html
# http://www.herongyang.com/Cryptography/
class DummyServiceReactor(object):
""" This class behaves like a ServiceReactor, except that it exists after treating a single request """
def __init__(self, transportObject, port):
""" c'tor
:param transportObject: type of TransportObject we will use
:param port: port to listen to
"""
self.__prepared = False
self.port = port
self.transportObject = transportObject
# Server transport object
self.transport = None
# Client connection
self.clientTransport = None
# Message received from the client
self.receivedMessage = None
def handleConnection(self, clientTransport):
""" This is normally done is Service.py in different thread
It more or less does Service._processInThread
"""
self.clientTransport = clientTransport
res = clientTransport.handshake()
assert res['OK'], res
self.receivedMessage = clientTransport.receiveData(1024)
clientTransport.sendData(MAGIC_ANSWER)
clientTransport.close()
def prepare(self):
""" Start listening """
if not self.__prepared:
self.__createListeners()
self.__prepared = True
def serve(self):
""" Wait for connections and handle the first one. """
self.prepare()
self.__acceptIncomingConnection()
def __createListeners(self):
""" Create the listener transport """
self.transport = self.transportObject(("", self.port), bServerMode=True)
res = self.transport.initAsServer()
assert res['OK']
def __acceptIncomingConnection(self):
"""
This method just gets the incoming connection, and handle it, once.
"""
sockets = [self.transport.getSocket()]
try:
_inList, _outList, _exList = select.select(sockets, [], [], 2)
clientTransport = self.transport.acceptConnection()['Value']
self.handleConnection(clientTransport)
except socket.error:
return
def closeListeningConnections(self):
""" Close the connection """
self.transport.close()
def transportByName(transport):
""" helper function to get a transport class by 'friendly' name. """
if transport.lower() == "plain":
return PlainTransport.PlainTransport
elif transport.lower() == "m2":
return M2SSLTransport.SSLTransport
elif transport.lower() == "gsi":
return GSISSLTransport.SSLTransport
raise RuntimeError("Unknown Transport Name: %s" % transport)
@fixture(scope="function", params=TRANSPORTTESTS)
def create_serverAndClient(request):
""" This function starts a server, and closes it after
The server will use the parametrized transport type
"""
# Reinitialize the configuration.
# We do it here rather than at the start of the module
# to accomodate for pytest when going through all the DIRAC tests
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
gConfigurationData.setOptionInCFG('/DIRAC/Security/CALocation', caLocation)
gConfigurationData.setOptionInCFG('/DIRAC/Security/CertFile', hostCertLocation)
gConfigurationData.setOptionInCFG('/DIRAC/Security/KeyFile', hostKeyLocation)
testStr = request.param
serverName, clientName = testStr.split("-")
serverClass = transportByName(serverName)
clientClass = transportByName(clientName)
sr = DummyServiceReactor(serverClass, PORT_NUMBER)
server_thread = threading.Thread(target=sr.serve)
sr.prepare()
server_thread.start()
# Create the client
clientOptions = {'clientMode': True,
'proxyLocation': proxyFile,
}
clientTransport = clientClass(
("localhost", PORT_NUMBER), bServerMode=False, **clientOptions)
res = clientTransport.initAsClient()
assert res['OK'], res
yield sr, clientTransport
clientTransport.close()
sr.closeListeningConnections()
server_thread.join()
# Clean the config
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
def ping_server(clientTransport):
""" This sends a message to the server and expects an answer
This basically does the same as BaseClient.py
:param clientTransport: the Transport object to be used as client
"""
clientTransport.setSocketTimeout(5)
clientTransport.sendData(MAGIC_QUESTION)
serverReturn = clientTransport.receiveData()
return serverReturn
def test_simpleMessage(create_serverAndClient):
""" Send a message, wait for an answer """
serv, client = create_serverAndClient
serverAnswer = ping_server(client)
assert serv.receivedMessage == MAGIC_QUESTION
assert serverAnswer == MAGIC_ANSWER
def test_getRemoteInfo(create_serverAndClient):
""" Check the information from remote peer"""
serv, client = create_serverAndClient
ping_server(client)
addr_info = client.getRemoteAddress()
assert addr_info[0] in ('127.0.0.1', '::ffff:127.0.0.1', '::1')
assert addr_info[1] == PORT_NUMBER
# The peer credentials are not filled on the client side
assert client.peerCredentials == {}
# We do not know about the port, so check only the address, taking into account bloody IPv6
assert serv.clientTransport.getRemoteAddress()[0] in (
'127.0.0.1', '::ffff:127.0.0.1', '::1')
peerCreds = serv.clientTransport.peerCredentials
# There are no credentials for PlainTransport
if client.__class__.__name__ == 'PlainTransport':
assert peerCreds == {}
else:
assert peerCreds['DN'] == getCertOption(USERCERT, 'subjectDN')
assert peerCreds['x509Chain'].getNumCertsInChain()['Value'] == 2
assert peerCreds['isProxy'] is True
assert peerCreds['isLimitedProxy'] is False
| chaen/DIRAC | Core/DISET/private/Transports/test/Test_SSLTransport.py | Python | gpl-3.0 | 7,191 | [
"DIRAC"
] | 8006350bb946637e2937dcad33b94fe5203aaa6ea60515810747a84c27bf1074 |
# -*- coding: utf-8 -*-
import logging
import os
import bpy
from mathutils import Matrix
from mmd_tools import bpyutils
from mmd_tools.core import vmd
from mmd_tools.core import vpd
class VPDExporter:
def __init__(self):
self.__osm_name = None
self.__scale = 1
self.__bone_util_cls = vmd.importer.BoneConverter
def __exportVPDFile(self, filepath, bones=None, morphs=None):
vpd_file = vpd.File()
vpd_file.osm_name = self.__osm_name
if bones:
vpd_file.bones = bones
if morphs:
vpd_file.morphs = morphs
vpd_file.save(filepath=filepath)
logging.info('Exported %s', vpd_file)
def __getConverters(self, pose_bones):
return {b:self.__bone_util_cls(b, self.__scale, invert=True) for b in pose_bones}
def __exportBones(self, armObj, converters=None, matrix_basis_map=None):
if armObj is None:
return None
pose_bones = armObj.pose.bones
if converters is None:
converters = self.__getConverters(pose_bones)
if matrix_basis_map is None:
matrix_basis_map = {}
matrix_identity = Matrix.Identity(4)
vpd_bones = []
for b in pose_bones:
if b.is_mmd_shadow_bone:
continue
if b.matrix_basis == matrix_basis_map.get(b, matrix_identity):
continue
bone_name = b.mmd_bone.name_j or b.name
converter = converters[b]
location = converter.convert_location(b.location)
w, x, y, z = b.matrix_basis.to_quaternion()
w, x, y, z = converter.convert_rotation([x, y, z, w])
vpd_bones.append(vpd.VpdBone(bone_name, location, [x, y, z, w]))
return vpd_bones
def __exportPoseLib(self, armObj, pose_type, filepath, use_pose_mode=False):
if armObj is None:
return None
if armObj.pose_library is None:
return None
pose_bones = armObj.pose.bones
converters = self.__getConverters(pose_bones)
backup = {b:(b.matrix_basis.copy(), b.bone.select) for b in pose_bones}
for b in pose_bones:
b.bone.select = False
matrix_basis_map = {}
if use_pose_mode:
matrix_basis_map = {b:bak[0] for b, bak in backup.items()}
def __export_index(index, filepath):
for b in pose_bones:
b.matrix_basis = matrix_basis_map.get(b, None) or Matrix.Identity(4)
bpy.ops.poselib.apply_pose(pose_index=index)
vpd_bones = self.__exportBones(armObj, converters, matrix_basis_map)
self.__exportVPDFile(filepath, vpd_bones)
try:
pose_markers = armObj.pose_library.pose_markers
with bpyutils.select_object(armObj):
bpy.ops.object.mode_set(mode='POSE')
if pose_type == 'ACTIVE':
if 0 <= pose_markers.active_index < len(pose_markers):
__export_index(pose_markers.active_index, filepath)
else:
folder = os.path.dirname(filepath)
for i, m in enumerate(pose_markers):
__export_index(i, os.path.join(folder, m.name+'.vpd'))
finally:
for b, bak in backup.items():
b.matrix_basis, b.bone.select = bak
def __exportMorphs(self, meshObj):
if meshObj is None:
return None
if meshObj.data.shape_keys is None:
return None
vpd_morphs = []
key_blocks = meshObj.data.shape_keys.key_blocks
for i in key_blocks.values():
if i.value == 0:
continue
vpd_morphs.append(vpd.VpdMorph(i.name, i.value))
return vpd_morphs
def export(self, **args):
armature = args.get('armature', None)
mesh = args.get('mesh', None)
filepath = args.get('filepath', '')
self.__scale = args.get('scale', 1.0)
self.__osm_name = '%s.osm'%args.get('model_name', None)
pose_type = args.get('pose_type', 'CURRENT')
if pose_type == 'CURRENT':
vpd_bones = self.__exportBones(armature)
vpd_morphs = self.__exportMorphs(mesh)
self.__exportVPDFile(filepath, vpd_bones, vpd_morphs)
elif pose_type in {'ACTIVE', 'ALL'}:
use_pose_mode = args.get('use_pose_mode', False)
if use_pose_mode:
self.__bone_util_cls = vmd.importer.BoneConverterPoseMode
self.__exportPoseLib(armature, pose_type, filepath, use_pose_mode)
else:
raise Exception('Unknown pose type "%s"', pose_type)
| powroupi/blender_mmd_tools | mmd_tools/core/vpd/exporter.py | Python | gpl-3.0 | 4,716 | [
"VMD"
] | 34fc317b1c3dbd29c8b4abaa016f6de6a7c531edea80ffb04ab70d5f9a262b0f |
##############################################################################
# MSIBI: A package for optimizing coarse-grained force fields using multistate
# iterative Boltzmann inversion.
# Copyright (c) 2017 Vanderbilt University and the Authors
#
# Authors: Christoph Klein, Timothy C. Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files, to deal
# in MSIBI without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# # copies of MSIBI, and to permit persons to whom MSIBI is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of MSIBI.
#
# MSIBI IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH MSIBI OR THE USE OR OTHER DEALINGS ALONG WITH
# MSIBI.
#
# You should have received a copy of the MIT license.
# If not, see <https://opensource.org/licenses/MIT/>.
##############################################################################
import networkx as nx
from networkx import NetworkXNoPath
import numpy as np
def find_1_n_exclusions(top, pairs, n):
"""Find exclusions in a trajectory based on an exculsion principle
Parameters
----------
top : mdtraj.Topology
Topology object containing the types and list of bonds
pairs : array-like, shape=(n_pairs, 2), dtype=int
Each row gives the indices of two atoms.
n : int
Exclude particles in pairs separated by n or fewer bonds
"""
G = nx.Graph()
G.add_nodes_from([a.index for a in top.atoms])
bonds = [b for b in top.bonds]
bonds_by_index = [(b[0].index, b[1].index) for b in bonds]
G.add_edges_from(bonds_by_index)
to_exclude = []
# TODO: make faster by looping over bonds instead of pairs
for i, pair in enumerate(pairs):
if is_1_n(pair, n, G) == True:
to_exclude.append(i)
return np.asarray(to_exclude)
def is_1_n(pair, n, G):
"""Find if atoms in a pair are separated by n or less bonds
Parameters
----------
n : int
Return false atoms in pair are separated by n or fewer bonds
pair : [int, int]
Pair of atom indices
G : networkx.Graph
A graph with atoms and nodes and bonds as edges
Returns
-------
answer : bool
answer == True if atoms are separated by n or fewer bonds, False otherwise
The graph is expected to have atom indices as nodes, and tuples of atom indices as
edges. Ideally, the nodes would be MDTraj.Atom-s and edges a list of tuples of
MDTraj.Atom-s
try:
return n > len(nx.shortest_path(G, pair[0], pair[1])) - 1
except: # no path between pair[0] and pair[1]
return False
"""
try:
return n > len(nx.shortest_path(G, pair[0], pair[1])) - 1
except NetworkXNoPath:
return False
| mosdef-hub/msibi | msibi/utils/find_exclusions.py | Python | mit | 3,343 | [
"MDTraj"
] | 056b53bb0a64c85df1644c877f21171555e34bf2d8826a45acd0ac9b6015523b |
"""
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
import deepchem as dc
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils import pad_array
from deepchem.feat.atomic_coordinates import AtomicCoordinates
class BPSymmetryFunctionInput(MolecularFeaturizer):
"""Calculate Symmetry Function for each atom in the molecules
This method is described in [1]_
References
----------
.. [1] Behler, Jörg, and Michele Parrinello. "Generalized neural-network
representation of high-dimensional potential-energy surfaces." Physical
review letters 98.14 (2007): 146401.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self, max_atoms):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
"""
self.max_atoms = max_atoms
def _featurize(self, mol):
coordfeat = AtomicCoordinates()
coordinates = coordfeat._featurize(mol)[0]
atom_numbers = np.array([atom.GetAtomicNum() for atom in mol.GetAtoms()])
atom_numbers = np.expand_dims(atom_numbers, axis=1)
assert atom_numbers.shape[0] == coordinates.shape[0]
n_atoms = atom_numbers.shape[0]
features = np.concatenate([atom_numbers, coordinates], axis=1)
return np.pad(features, ((0, self.max_atoms - n_atoms), (0, 0)), 'constant')
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of a
molecule. This method is described in [1]_.
Parameters
----------
max_atoms : int
Maximum number of atoms for any molecule in the dataset. Used to
pad the Coulomb matrix.
remove_hydrogens : bool, optional (default False)
Whether to remove hydrogens before constructing Coulomb matrix.
randomize : bool, optional (default False)
Whether to randomize Coulomb matrices to remove dependence on atom
index order.
upper_tri : bool, optional (default False)
Whether to return the upper triangular portion of the Coulomb matrix.
n_samples : int, optional (default 1)
Number of random Coulomb matrices to generate if randomize is True.
seed : int, optional
Random seed.
Example
-------
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file) #doctest: +ELLIPSIS
Reading structures from deepchem/feat/tests/data/water.sdf.
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
conformers = True
name = 'coulomb_matrix'
def __init__(self,
max_atoms,
remove_hydrogens=False,
randomize=False,
upper_tri=False,
n_samples=1,
seed=None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ValueError("This class requires RDKit to be installed.")
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, mol):
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
features = self.coulomb_matrix(mol)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
return features
def coulomb_matrix(self, mol):
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
from rdkit import Chem
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
rval = np.asarray(rval)
return rval
def randomize_coulomb_matrix(self, m):
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m : ndarray
Coulomb matrix.
n_samples : int, optional (default 1)
Number of random matrices to generate.
seed : int, optional
Random seed.
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf):
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf : RDKit Conformer
Molecule conformer.
"""
n_atoms = conf.GetNumAtoms()
coords = [
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
] # Convert AtomPositions from Angstrom to bohr (atomic units)
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Parameters
----------
max_atoms : int
Maximum number of atoms for any molecule in the dataset. Used to
pad the Coulomb matrix.
remove_hydrogens : bool, optional (default False)
Whether to remove hydrogens before constructing Coulomb matrix.
randomize : bool, optional (default False)
Whether to randomize Coulomb matrices to remove dependence on atom
index order.
n_samples : int, optional (default 1)
Number of random Coulomb matrices to generate if randomize is True.
seed : int, optional
Random seed.
Example
-------
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file) #doctest: +ELLIPSIS
Reading structures from deepchem/feat/tests/data/water.sdf.
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
conformers = True
name = 'coulomb_matrix'
def __init__(self,
max_atoms,
remove_hydrogens=False,
randomize=False,
n_samples=1,
seed=None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, mol):
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
cmat = self.coulomb_matrix(mol)
features = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features.append(f)
features = np.asarray(features)
return features
| miaecle/deepchem | deepchem/feat/coulomb_matrices.py | Python | mit | 10,564 | [
"RDKit"
] | 02852b95779af99618e1bead9836ade225a5ae59b3578fcdcb5b1d3b504538d9 |
# -*- coding: utf-8 -*-
# Copyright (c) 2006, 2009-2010, 2012-2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012, 2014 Google, Inc.
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""check for signs of poor design"""
import re
from collections import defaultdict
from astroid import If, BoolOp
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*')
MSGS = {
'R0901': ('Too many ancestors (%s/%s)',
'too-many-ancestors',
'Used when class has too many parent classes, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0902': ('Too many instance attributes (%s/%s)',
'too-many-instance-attributes',
'Used when class has too many instance attributes, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0903': ('Too few public methods (%s/%s)',
'too-few-public-methods',
'Used when class has too few public methods, so be sure it\'s \
really worth it.'),
'R0904': ('Too many public methods (%s/%s)',
'too-many-public-methods',
'Used when class has too many public methods, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0911': ('Too many return statements (%s/%s)',
'too-many-return-statements',
'Used when a function or method has too many return statement, \
making it hard to follow.'),
'R0912': ('Too many branches (%s/%s)',
'too-many-branches',
'Used when a function or method has too many branches, \
making it hard to follow.'),
'R0913': ('Too many arguments (%s/%s)',
'too-many-arguments',
'Used when a function or method takes too many arguments.'),
'R0914': ('Too many local variables (%s/%s)',
'too-many-locals',
'Used when a function or method has too many local variables.'),
'R0915': ('Too many statements (%s/%s)',
'too-many-statements',
'Used when a function or method has too many statements. You \
should then split it in smaller functions / methods.'),
'R0916': ('Too many boolean expressions in if statement (%s/%s)',
'too-many-boolean-expressions',
'Used when a if statement contains too many boolean '
'expressions'),
}
def _count_boolean_expressions(bool_op):
"""Counts the number of boolean expressions in BoolOp `bool_op` (recursive)
example: a and (b or c or (d and e)) ==> 5 boolean expressions
"""
nb_bool_expr = 0
for bool_expr in bool_op.get_children():
if isinstance(bool_expr, BoolOp):
nb_bool_expr += _count_boolean_expressions(bool_expr)
else:
nb_bool_expr += 1
return nb_bool_expr
class MisdesignChecker(BaseChecker):
"""checks for sign of poor/misdesign:
* number of methods, attributes, local variables...
* size, complexity of functions, methods
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'design'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('max-args',
{'default' : 5, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of arguments for function / method'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('max-locals',
{'default' : 15, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of locals for function / method body'}
),
('max-returns',
{'default' : 6, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of return / yield for function / '
'method body'}
),
('max-branches',
{'default' : 12, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of branch for function / method body'}
),
('max-statements',
{'default' : 50, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of statements in function / method '
'body'}
),
('max-parents',
{'default' : 7,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of parents for a class (see R0901).'}
),
('max-attributes',
{'default' : 7,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of attributes for a class \
(see R0902).'}
),
('min-public-methods',
{'default' : 2,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Minimum number of public methods for a class \
(see R0903).'}
),
('max-public-methods',
{'default' : 20,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of public methods for a class \
(see R0904).'}
),
('max-bool-expr',
{'default': 5,
'type': 'int',
'metavar': '<num>',
'help': 'Maximum number of boolean expressions in a if '
'statement'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self._returns = None
self._branches = None
self._stmts = 0
def open(self):
"""initialize visit variables"""
self.stats = self.linter.add_stats()
self._returns = []
self._branches = defaultdict(int)
@check_messages('too-many-ancestors', 'too-many-instance-attributes',
'too-few-public-methods', 'too-many-public-methods')
def visit_classdef(self, node):
"""check size of inheritance hierarchy and number of instance attributes
"""
# Is the total inheritance hierarchy is 7 or less?
nb_parents = len(list(node.ancestors()))
if nb_parents > self.config.max_parents:
self.add_message('too-many-ancestors', node=node,
args=(nb_parents, self.config.max_parents))
# Does the class contain less than 20 attributes for
# non-GUI classes (40 for GUI)?
# FIXME detect gui classes
if len(node.instance_attrs) > self.config.max_attributes:
self.add_message('too-many-instance-attributes', node=node,
args=(len(node.instance_attrs),
self.config.max_attributes))
@check_messages('too-few-public-methods', 'too-many-public-methods')
def leave_classdef(self, node):
"""check number of public methods"""
my_methods = sum(1 for method in node.mymethods()
if not method.name.startswith('_'))
all_methods = sum(1 for method in node.methods()
if not method.name.startswith('_'))
# Does the class contain less than n public methods ?
# This checks only the methods defined in the current class,
# since the user might not have control over the classes
# from the ancestors. It avoids some false positives
# for classes such as unittest.TestCase, which provides
# a lot of assert methods. It doesn't make sense to warn
# when the user subclasses TestCase to add his own tests.
if my_methods > self.config.max_public_methods:
self.add_message('too-many-public-methods', node=node,
args=(my_methods,
self.config.max_public_methods))
# stop here for exception, metaclass and interface classes
if node.type != 'class':
return
# Does the class contain more than n public methods ?
# This checks all the methods defined by ancestors and
# by the current class.
if all_methods < self.config.min_public_methods:
self.add_message('too-few-public-methods', node=node,
args=(all_methods,
self.config.min_public_methods))
@check_messages('too-many-return-statements', 'too-many-branches',
'too-many-arguments', 'too-many-locals',
'too-many-statements')
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
# init branch and returns counters
self._returns.append(0)
# check number of arguments
args = node.args.args
if args is not None:
ignored_args_num = len(
[arg for arg in args
if self.config.ignored_argument_names.match(arg.name)])
argnum = len(args) - ignored_args_num
if argnum > self.config.max_args:
self.add_message('too-many-arguments', node=node,
args=(len(args), self.config.max_args))
else:
ignored_args_num = 0
# check number of local variables
locnum = len(node.locals) - ignored_args_num
if locnum > self.config.max_locals:
self.add_message('too-many-locals', node=node,
args=(locnum, self.config.max_locals))
# init statements counter
self._stmts = 1
visit_asyncfunctiondef = visit_functiondef
@check_messages('too-many-return-statements', 'too-many-branches',
'too-many-arguments', 'too-many-locals',
'too-many-statements')
def leave_functiondef(self, node):
"""most of the work is done here on close:
checks for max returns, branch, return in __init__
"""
returns = self._returns.pop()
if returns > self.config.max_returns:
self.add_message('too-many-return-statements', node=node,
args=(returns, self.config.max_returns))
branches = self._branches[node]
if branches > self.config.max_branches:
self.add_message('too-many-branches', node=node,
args=(branches, self.config.max_branches))
# check number of statements
if self._stmts > self.config.max_statements:
self.add_message('too-many-statements', node=node,
args=(self._stmts, self.config.max_statements))
leave_asyncfunctiondef = leave_functiondef
def visit_return(self, _):
"""count number of returns"""
if not self._returns:
return # return outside function, reported by the base checker
self._returns[-1] += 1
def visit_default(self, node):
"""default visit method -> increments the statements counter if
necessary
"""
if node.is_statement:
self._stmts += 1
def visit_tryexcept(self, node):
"""increments the branches counter"""
branches = len(node.handlers)
if node.orelse:
branches += 1
self._inc_branch(node, branches)
self._stmts += branches
def visit_tryfinally(self, node):
"""increments the branches counter"""
self._inc_branch(node, 2)
self._stmts += 2
@check_messages('too-many-boolean-expressions')
def visit_if(self, node):
"""increments the branches counter and checks boolean expressions"""
self._check_boolean_expressions(node)
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and (len(node.orelse) > 1 or
not isinstance(node.orelse[0], If)):
branches += 1
self._inc_branch(node, branches)
self._stmts += branches
def _check_boolean_expressions(self, node):
"""Go through "if" node `node` and counts its boolean expressions
if the "if" node test is a BoolOp node
"""
condition = node.test
if not isinstance(condition, BoolOp):
return
nb_bool_expr = _count_boolean_expressions(condition)
if nb_bool_expr > self.config.max_bool_expr:
self.add_message('too-many-boolean-expressions', node=condition,
args=(nb_bool_expr, self.config.max_bool_expr))
def visit_while(self, node):
"""increments the branches counter"""
branches = 1
if node.orelse:
branches += 1
self._inc_branch(node, branches)
visit_for = visit_while
def _inc_branch(self, node, branchesnum=1):
"""increments the branches counter"""
self._branches[node.scope()] += branchesnum
def register(linter):
"""required method to auto register this checker """
linter.register_checker(MisdesignChecker(linter))
| axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/checkers/design_analysis.py | Python | apache-2.0 | 13,994 | [
"VisIt"
] | 147261389dc57affb5d456e6de53655d32000b3d5b9c265ccee70184c19b9691 |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import warnings
from veidt.abstract import Model
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import kernels
from sklearn.externals import joblib
class GaussianProcessRegressionModel(Model):
"""
Gaussian Process Regression Model
"""
def __init__(self, describer, kernel_category='RBF', restarts=10, **kwargs):
"""
Args:
describer (Describer): Describer to convert
input object to descriptors.
kernel_category (str): Name of kernel from
sklearn.gaussian_process.kernels. Default to 'RBF', i.e.,
squared exponential.
restarts (int): The number of restarts of the optimizer for
finding the kernel’s parameters which maximize the
log-marginal likelihood.
kwargs: kwargs to be passed to kernel object, e.g. length_scale,
length_scale_bounds.
"""
self.describer = describer
kernel = getattr(kernels, kernel_category)(**kwargs)
self.model = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=restarts)
self._xtrain = None
self._xtest = None
def fit(self, inputs, outputs, override=False):
"""
Args:
inputs (list): List of input training objects.
outputs (list): List/Array of output values
(supervisory signals).
override: (bool) Whether to calculate the feature
vectors from given inputs. Default to False. Set to True if
you want to retrain the model with a different set of
training inputs.
"""
if not self._xtrain or override:
xtrain = self.describer.describe_all(inputs)
else:
warnings.warn("Feature vectors retrieved from cache "
"and input training objects ignored. "
"To override the old cache with feature vectors "
"of new training objects, set override=True.")
xtrain = self._xtrain
self.model.fit(xtrain, outputs)
self._xtrain = xtrain
def predict(self, inputs, override=False, **kwargs):
"""
Args:
inputs (List): List of input testing objects.
override: (bool) Whether to calculate the feature
vectors from given inputs. Default to False. Set to True if
you want to test the model with a different set of testing inputs.
kwargs: kwargs to be passed to predict method, e.g.
return_std, return_cov.
Returns:
Predicted output array from inputs.
"""
if self._xtest is None or override:
xtest = self.describer.describe_all(inputs)
else:
warnings.warn("Feature vectors retrieved from cache "
"and input testing objects ignored. "
"To override the old cache with feature vectors "
"of new testing objects, set override=True.")
xtest = self._xtest
self._xtest = xtest
return self.model.predict(xtest, **kwargs)
@property
def params(self):
return self.model.get_params()
def save(self, model_fname):
joblib.dump(self.model, '%s.pkl' % model_fname)
def load(self, model_fname):
self.model = joblib.load(model_fname) | czhengsci/veidt | veidt/model/gaussian_process.py | Python | bsd-3-clause | 3,601 | [
"Gaussian"
] | f5f0fb47d9e63d99428177988b5b65b4105f5437b716b2799ec6a25ad214c7a8 |
try: paraview.simple
except: from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
a1_needsaname_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 105.0, 1.0, 0.5, 0.0] )
a3_cellNormals_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.999999982885729, 0.0, 0.5, 0.0, 0.9999999828857291, 1.0, 0.5, 0.0] )
a2_TextureCoordinates_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.4142135623730951, 1.0, 0.5, 0.0] )
a3_Normals_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.999999982885729, 0.0, 0.5, 0.0, 0.9999999828857291, 1.0, 0.5, 0.0] )
a1_needsaname_PVLookupTable = GetLookupTableForArray( "needsaname", 1, RGBPoints=[0.0, 0.23, 0.299, 0.754, 52.5, 0.865, 0.865, 0.865, 105.0, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ScalarOpacityFunction=a1_needsaname_PiecewiseFunction, ColorSpace='Diverging', ScalarRangeInitialized=1.0 )
a3_cellNormals_PVLookupTable = GetLookupTableForArray( "cellNormals", 3, RGBPoints=[0.999999982885729, 0.23, 0.299, 0.754, 0.9999999828857291, 0.865, 0.865, 0.865, 0.9999999828857291, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ScalarOpacityFunction=a3_cellNormals_PiecewiseFunction, ColorSpace='Diverging', ScalarRangeInitialized=1.0 )
a2_TextureCoordinates_PVLookupTable = GetLookupTableForArray( "TextureCoordinates", 2, RGBPoints=[0.0, 0.23, 0.299, 0.754, 0.7071067811865476, 0.865, 0.865, 0.865, 1.4142135623730951, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ScalarOpacityFunction=a2_TextureCoordinates_PiecewiseFunction, ColorSpace='Diverging', ScalarRangeInitialized=1.0 )
a3_Normals_PVLookupTable = GetLookupTableForArray( "Normals", 3, RGBPoints=[0.999999982885729, 0.23, 0.299, 0.754, 0.9999999828857291, 0.865, 0.865, 0.865, 0.9999999828857291, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ScalarOpacityFunction=a3_Normals_PiecewiseFunction, ColorSpace='Diverging', ScalarRangeInitialized=1.0 )
tmp_vti = coprocessor.CreateProducer( datadescription, "input" )
Histogram1 = Histogram( guiName="Histogram1", CustomBinRanges=[0.0, 105.0], SelectInputArray=['POINTS', 'needsaname'] )
SetActiveSource(tmp_vti)
RenderView1 = coprocessor.CreateView( CreateRenderView, "image_0_%t.png", 1, 0, 1, 420, 546 )
RenderView1.CameraViewUp = [-0.6001400955297163, -0.799702535069662, 0.017541982235173254]
RenderView1.CacheKey = 0.0
RenderView1.StereoType = 0
RenderView1.StereoRender = 0
RenderView1.CameraPosition = [-8.345336051586441, 18.37412911870887, -38.86607527525541]
RenderView1.StereoCapableWindow = 0
RenderView1.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
RenderView1.CameraFocalPoint = [7.5, 7.5000000000000036, 7.500000000000024]
RenderView1.CameraParallelScale = 12.99038105676658
RenderView1.CenterOfRotation = [7.5, 7.5, 7.5]
DataRepresentation1 = Show()
DataRepresentation1.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation1.Slice = 7
DataRepresentation1.SelectionPointFieldDataArrayName = 'needsaname'
DataRepresentation1.ScalarOpacityFunction = a1_needsaname_PiecewiseFunction
DataRepresentation1.ColorArrayName = ('POINT_DATA', 'needsaname')
DataRepresentation1.ScalarOpacityUnitDistance = 1.7320508075688776
DataRepresentation1.LookupTable = a1_needsaname_PVLookupTable
DataRepresentation1.Representation = 'Points'
DataRepresentation1.ScaleFactor = 1.5
SetActiveSource(Histogram1)
XYBarChartView1 = coprocessor.CreateView( CreateBarChartView, "image_1_%t.png", 1, 0, 1, 419, 546 )
XYBarChartView1.CacheKey = 0.0
XYBarChartView1.BottomAxisRange = [-7.933884143829346, 112.06611585617065]
XYBarChartView1.TopAxisRange = [0.0, 6.66]
XYBarChartView1.ChartTitle = ''
XYBarChartView1.AxisTitle = ['', '', '', '']
XYBarChartView1.LeftAxisRange = [-2.9880478382110596, 747.0119521617889]
XYBarChartView1.RightAxisRange = [0.0, 6.66]
DataRepresentation3 = Show()
DataRepresentation3.XArrayName = 'bin_extents'
DataRepresentation3.SeriesVisibility = ['bin_extents', '0', 'vtkOriginalIndices', '0', 'bin_values', '1']
DataRepresentation3.AttributeType = 'Row Data'
DataRepresentation3.UseIndexForXAxis = 0
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
freqs = {'input': [1]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variables that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView
coprocessor.EnableLiveVisualization(False)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=False)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
| tfogal/freeprocessing | pvsim/pvimage.py | Python | lgpl-3.0 | 7,091 | [
"ParaView"
] | fbdc279532d6a18f9893fd2f71935d610ba2ee76450050d2bf225f43aeb34fa4 |
# slicer imports
from __main__ import vtk, qt, ctk, slicer
# vmtk includes
import SlicerVmtkCommonLib
#
# Level Set Segmentation using VMTK based Tools
#
class LevelSetSegmentation:
def __init__( self, parent ):
parent.title = "Level Set Segmentation"
parent.categories = ["Vascular Modeling Toolkit", ]
parent.contributors = ["Daniel Haehn (Boston Children's Hospital)", "Luca Antiga (Orobix)", "Steve Pieper (Isomics)"]
parent.helpText = """dsfdsf"""
parent.acknowledgementText = """sdfsdfdsf"""
self.parent = parent
class LevelSetSegmentationWidget:
def __init__( self, parent=None ):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout( qt.QVBoxLayout() )
self.parent.setMRMLScene( slicer.mrmlScene )
else:
self.parent = parent
self.layout = self.parent.layout()
# this flag is 1 if there is an update in progress
self.__updating = 1
# the pointer to the logic
self.__logic = None
if not parent:
self.setup()
self.__inputVolumeNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__seedFiducialsNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__vesselnessVolumeNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__outputVolumeNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__outputModelNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__stopperFiducialsNodeSelector.setMRMLScene( slicer.mrmlScene )
# after setup, be ready for events
self.__updating = 0
self.parent.show()
# register default slots
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)', self.onMRMLSceneChanged )
def GetLogic( self ):
'''
'''
if not self.__logic:
self.__logic = SlicerVmtkCommonLib.LevelSetSegmentationLogic()
return self.__logic
def setup( self ):
# check if the SlicerVmtk module is installed properly
# self.__vmtkInstalled = SlicerVmtkCommonLib.Helper.CheckIfVmtkIsInstalled()
# Helper.Debug("VMTK found: " + self.__vmtkInstalled)
#
# the I/O panel
#
ioCollapsibleButton = ctk.ctkCollapsibleButton()
ioCollapsibleButton.text = "Input/Output"
self.layout.addWidget( ioCollapsibleButton )
ioFormLayout = qt.QFormLayout( ioCollapsibleButton )
# inputVolume selector
self.__inputVolumeNodeSelector = slicer.qMRMLNodeComboBox()
self.__inputVolumeNodeSelector.objectName = 'inputVolumeNodeSelector'
self.__inputVolumeNodeSelector.toolTip = "Select the input volume. This should always be the original image and not a vesselness image, if possible."
self.__inputVolumeNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.__inputVolumeNodeSelector.noneEnabled = False
self.__inputVolumeNodeSelector.addEnabled = False
self.__inputVolumeNodeSelector.removeEnabled = False
self.__inputVolumeNodeSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", "0" )
ioFormLayout.addRow( "Input Volume:", self.__inputVolumeNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__inputVolumeNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
self.__inputVolumeNodeSelector.connect( 'currentNodeChanged(vtkMRMLNode*)', self.onInputVolumeChanged )
self.__inputVolumeNodeSelector.connect( 'nodeActivated(vtkMRMLNode*)', self.onInputVolumeChanged )
# seed selector
self.__seedFiducialsNodeSelector = slicer.qMRMLNodeComboBox()
self.__seedFiducialsNodeSelector.objectName = 'seedFiducialsNodeSelector'
self.__seedFiducialsNodeSelector.toolTip = "Select a hierarchy containing the fiducials to use as Seeds."
self.__seedFiducialsNodeSelector.nodeTypes = ['vtkMRMLMarkupsFiducialNode']
self.__seedFiducialsNodeSelector.baseName = "Seeds"
self.__seedFiducialsNodeSelector.noneEnabled = False
self.__seedFiducialsNodeSelector.addEnabled = False
self.__seedFiducialsNodeSelector.removeEnabled = False
ioFormLayout.addRow( "Seeds:", self.__seedFiducialsNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__seedFiducialsNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
self.__ioAdvancedToggle = qt.QCheckBox( "Show Advanced I/O Properties" )
self.__ioAdvancedToggle.setChecked( False )
ioFormLayout.addRow( self.__ioAdvancedToggle )
#
# I/O advanced panel
#
self.__ioAdvancedPanel = qt.QFrame( ioCollapsibleButton )
self.__ioAdvancedPanel.hide()
self.__ioAdvancedPanel.setFrameStyle( 6 )
ioFormLayout.addRow( self.__ioAdvancedPanel )
self.__ioAdvancedToggle.connect( "clicked()", self.onIOAdvancedToggle )
ioAdvancedFormLayout = qt.QFormLayout( self.__ioAdvancedPanel )
# inputVolume selector
self.__vesselnessVolumeNodeSelector = slicer.qMRMLNodeComboBox()
self.__vesselnessVolumeNodeSelector.objectName = 'vesselnessVolumeNodeSelector'
self.__vesselnessVolumeNodeSelector.toolTip = "Select the input vesselness volume. This is optional input."
self.__vesselnessVolumeNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.__vesselnessVolumeNodeSelector.noneEnabled = True
self.__vesselnessVolumeNodeSelector.addEnabled = False
self.__vesselnessVolumeNodeSelector.removeEnabled = False
self.__vesselnessVolumeNodeSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", "0" )
ioAdvancedFormLayout.addRow( "Vesselness Volume:", self.__vesselnessVolumeNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__vesselnessVolumeNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
self.__vesselnessVolumeNodeSelector.setCurrentNode( None )
# stopper selector
self.__stopperFiducialsNodeSelector = slicer.qMRMLNodeComboBox()
self.__stopperFiducialsNodeSelector.objectName = 'stopperFiducialsNodeSelector'
self.__stopperFiducialsNodeSelector.toolTip = "Select a hierarchy containing the fiducials to use as Stoppers. Whenever one stopper is reached, the segmentation stops."
self.__stopperFiducialsNodeSelector.nodeTypes = ['vtkMRMLMarkupsFiducialNode']
self.__stopperFiducialsNodeSelector.baseName = "Stoppers"
self.__stopperFiducialsNodeSelector.noneEnabled = False
self.__stopperFiducialsNodeSelector.addEnabled = True
self.__stopperFiducialsNodeSelector.removeEnabled = False
ioAdvancedFormLayout.addRow( "Stoppers:", self.__stopperFiducialsNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__stopperFiducialsNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
# outputVolume selector
self.__outputVolumeNodeSelector = slicer.qMRMLNodeComboBox()
self.__outputVolumeNodeSelector.toolTip = "Select the output labelmap."
self.__outputVolumeNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.__outputVolumeNodeSelector.baseName = "LevelSetSegmentation"
self.__outputVolumeNodeSelector.noneEnabled = False
self.__outputVolumeNodeSelector.addEnabled = True
self.__outputVolumeNodeSelector.selectNodeUponCreation = True
self.__outputVolumeNodeSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", "1" )
self.__outputVolumeNodeSelector.removeEnabled = True
ioAdvancedFormLayout.addRow( "Output Labelmap:", self.__outputVolumeNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__outputVolumeNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
# outputModel selector
self.__outputModelNodeSelector = slicer.qMRMLNodeComboBox()
self.__outputModelNodeSelector.objectName = 'outputModelNodeSelector'
self.__outputModelNodeSelector.toolTip = "Select the output model."
self.__outputModelNodeSelector.nodeTypes = ['vtkMRMLModelNode']
self.__outputModelNodeSelector.baseName = "LevelSetSegmentationModel"
self.__outputModelNodeSelector.hideChildNodeTypes = ['vtkMRMLMarkupsFiducialNode']# hide all annotation nodes
self.__outputModelNodeSelector.noneEnabled = False
self.__outputModelNodeSelector.addEnabled = True
self.__outputModelNodeSelector.selectNodeUponCreation = True
self.__outputModelNodeSelector.removeEnabled = True
ioAdvancedFormLayout.addRow( "Output Model:", self.__outputModelNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__outputModelNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
#
# the segmentation panel
#
segmentationCollapsibleButton = ctk.ctkCollapsibleButton()
segmentationCollapsibleButton.text = "Segmentation"
self.layout.addWidget( segmentationCollapsibleButton )
segmentationFormLayout = qt.QFormLayout( segmentationCollapsibleButton )
# Threshold slider
thresholdLabel = qt.QLabel()
thresholdLabel.text = "Thresholding" + SlicerVmtkCommonLib.Helper.CreateSpace( 7 )
thresholdLabel.toolTip = "Choose the intensity range to segment."
thresholdLabel.setAlignment( 4 )
segmentationFormLayout.addRow( thresholdLabel )
self.__thresholdSlider = slicer.qMRMLRangeWidget()
segmentationFormLayout.addRow( self.__thresholdSlider )
self.__thresholdSlider.connect( 'valuesChanged(double,double)', self.onThresholdSliderChanged )
self.__segmentationAdvancedToggle = qt.QCheckBox( "Show Advanced Segmentation Properties" )
self.__segmentationAdvancedToggle.setChecked( False )
segmentationFormLayout.addRow( self.__segmentationAdvancedToggle )
#
# segmentation advanced panel
#
self.__segmentationAdvancedPanel = qt.QFrame( segmentationCollapsibleButton )
self.__segmentationAdvancedPanel.hide()
self.__segmentationAdvancedPanel.setFrameStyle( 6 )
segmentationFormLayout.addRow( self.__segmentationAdvancedPanel )
self.__segmentationAdvancedToggle.connect( "clicked()", self.onSegmentationAdvancedToggle )
segmentationAdvancedFormLayout = qt.QFormLayout( self.__segmentationAdvancedPanel )
# inflation slider
inflationLabel = qt.QLabel()
inflationLabel.text = "less inflation <-> more inflation" + SlicerVmtkCommonLib.Helper.CreateSpace( 14 )
inflationLabel.setAlignment( 4 )
inflationLabel.toolTip = "Define how fast the segmentation expands."
segmentationAdvancedFormLayout.addRow( inflationLabel )
self.__inflationSlider = ctk.ctkSliderWidget()
self.__inflationSlider.decimals = 0
self.__inflationSlider.minimum = -100
self.__inflationSlider.maximum = 100
self.__inflationSlider.singleStep = 10
self.__inflationSlider.toolTip = inflationLabel.toolTip
segmentationAdvancedFormLayout.addRow( self.__inflationSlider )
# curvature slider
curvatureLabel = qt.QLabel()
curvatureLabel.text = "less curvature <-> more curvature" + SlicerVmtkCommonLib.Helper.CreateSpace( 14 )
curvatureLabel.setAlignment( 4 )
curvatureLabel.toolTip = "Choose a high curvature to generate a smooth segmentation."
segmentationAdvancedFormLayout.addRow( curvatureLabel )
self.__curvatureSlider = ctk.ctkSliderWidget()
self.__curvatureSlider.decimals = 0
self.__curvatureSlider.minimum = -100
self.__curvatureSlider.maximum = 100
self.__curvatureSlider.singleStep = 10
self.__curvatureSlider.toolTip = curvatureLabel.toolTip
segmentationAdvancedFormLayout.addRow( self.__curvatureSlider )
# attraction slider
attractionLabel = qt.QLabel()
attractionLabel.text = "less attraction to gradient <-> more attraction to gradient" + SlicerVmtkCommonLib.Helper.CreateSpace( 14 )
attractionLabel.setAlignment( 4 )
attractionLabel.toolTip = "Configure how the segmentation travels towards gradient ridges (vessel lumen wall)."
segmentationAdvancedFormLayout.addRow( attractionLabel )
self.__attractionSlider = ctk.ctkSliderWidget()
self.__attractionSlider.decimals = 0
self.__attractionSlider.minimum = -100
self.__attractionSlider.maximum = 100
self.__attractionSlider.singleStep = 10
self.__attractionSlider.toolTip = attractionLabel.toolTip
segmentationAdvancedFormLayout.addRow( self.__attractionSlider )
# iteration spinbox
self.__iterationSpinBox = qt.QSpinBox()
self.__iterationSpinBox.minimum = 0
self.__iterationSpinBox.maximum = 5000
self.__iterationSpinBox.singleStep = 10
self.__iterationSpinBox.toolTip = "Choose the number of evolution iterations."
segmentationAdvancedFormLayout.addRow( SlicerVmtkCommonLib.Helper.CreateSpace( 100 ) + "Iterations:", self.__iterationSpinBox )
#
# Reset, preview and apply buttons
#
self.__buttonBox = qt.QDialogButtonBox()
self.__resetButton = self.__buttonBox.addButton( self.__buttonBox.RestoreDefaults )
self.__resetButton.toolTip = "Click to reset all input elements to default."
self.__previewButton = self.__buttonBox.addButton( self.__buttonBox.Discard )
self.__previewButton.setIcon( qt.QIcon() )
self.__previewButton.text = "Preview.."
self.__previewButton.toolTip = "Click to refresh the preview."
self.__startButton = self.__buttonBox.addButton( self.__buttonBox.Apply )
self.__startButton.setIcon( qt.QIcon() )
self.__startButton.text = "Start!"
self.__startButton.enabled = False
self.__startButton.toolTip = "Click to start the filtering."
self.layout.addWidget( self.__buttonBox )
self.__resetButton.connect( "clicked()", self.restoreDefaults )
self.__previewButton.connect( "clicked()", self.onRefreshButtonClicked )
self.__startButton.connect( "clicked()", self.onStartButtonClicked )
# be ready for events
self.__updating = 0
# set default values
self.restoreDefaults()
# compress the layout
self.layout.addStretch( 1 )
def onStartButtonClicked( self ):
'''
'''
# this is no preview
self.start( False )
def onRefreshButtonClicked( self ):
'''
'''
# perform the preview
self.start( True )
# activate startButton
self.__startButton.enabled = True
def onMRMLSceneChanged( self ):
'''
'''
SlicerVmtkCommonLib.Helper.Debug( "onMRMLSceneChanged" )
self.restoreDefaults()
def selectVesselnessVolume( self ):
'''
'''
currentNode = self.__inputVolumeNodeSelector.currentNode()
currentVesselnessNode = self.__vesselnessVolumeNodeSelector.currentNode()
if currentVesselnessNode:
return currentVesselnessNode
# check if we have a corresponding vesselness node in the scene and set it then
v = None
vesselnessCollection = slicer.mrmlScene.GetNodesByClassByName( "vtkMRMLScalarVolumeNode", "VesselnessFiltered" )
numberOfVesselnessNodes = vesselnessCollection.GetNumberOfItems()
SlicerVmtkCommonLib.Helper.Debug( "Found " + str( numberOfVesselnessNodes ) + " Vesselness node(s).." )
for i in xrange( numberOfVesselnessNodes ):
v = vesselnessCollection.GetItemAsObject( i )
if ( v.GetImageData().GetDimensions() == currentNode.GetImageData().GetDimensions() and
v.GetSpacing() == currentNode.GetSpacing() and
v.GetOrigin() == currentNode.GetOrigin() ):
# this is likely the corresponding vesselness node
SlicerVmtkCommonLib.Helper.Debug( "Configuring vesselnessVolumeNodeSelector to use: " + str( v.GetName() ) + " id: " + str( v.GetID() ) )
self.__vesselnessVolumeNodeSelector.setCurrentNode( v )
# jump out of loop
break
return v
def onInputVolumeChanged( self ):
'''
'''
if not self.__updating:
self.__updating = 1
SlicerVmtkCommonLib.Helper.Debug( "onInputVolumeChanged" )
# reset the thresholdSlider
self.__thresholdSlider.minimum = 0
self.__thresholdSlider.maximum = 100
self.__thresholdSlider.minimumValue = 0
self.__thresholdSlider.maximumValue = 100
currentNode = self.__inputVolumeNodeSelector.currentNode()
if currentNode:
v = self.selectVesselnessVolume()
# if we have a vesselnessNode, we will configure the threshold slider for it instead of the original image
# if not, the currentNode is the input volume
if v:
SlicerVmtkCommonLib.Helper.Debug( "Using Vesselness volume to configure thresholdSlider.." )
currentNode = v
currentImageData = currentNode.GetImageData()
currentDisplayNode = currentNode.GetDisplayNode()
if currentImageData:
currentScalarRange = currentImageData.GetScalarRange()
minimumScalarValue = round( currentScalarRange[0], 0 )
maximumScalarValue = round( currentScalarRange[1], 0 )
self.__thresholdSlider.minimum = minimumScalarValue
self.__thresholdSlider.maximum = maximumScalarValue
# if the image has a small scalarRange, we have to adjust the singleStep
if maximumScalarValue <= 10:
self.__thresholdSlider.singleStep = 0.1
if currentDisplayNode:
if currentDisplayNode.GetApplyThreshold():
# if a threshold is already applied, use it!
self.__thresholdSlider.minimumValue = currentDisplayNode.GetLowerThreshold()
self.__thresholdSlider.maximumValue = currentDisplayNode.GetUpperThreshold()
else:
# don't use a threshold, use the scalar range
SlicerVmtkCommonLib.Helper.Debug( "Reset thresholdSlider's values." )
self.__thresholdSlider.minimumValue = minimumScalarValue
self.__thresholdSlider.maximumValue = maximumScalarValue
self.__updating = 0
def resetThresholdOnDisplayNode( self ):
'''
'''
if not self.__updating:
self.__updating = 1
SlicerVmtkCommonLib.Helper.Debug( "resetThresholdOnDisplayNode" )
currentNode = self.__inputVolumeNodeSelector.currentNode()
if currentNode:
currentDisplayNode = currentNode.GetDisplayNode()
if currentDisplayNode:
currentDisplayNode.SetApplyThreshold( 0 )
self.__updating = 0
def onThresholdSliderChanged( self ):
'''
'''
if not self.__updating:
self.__updating = 1
# first, check if we have a vesselness node
currentNode = self.selectVesselnessVolume()
if currentNode:
SlicerVmtkCommonLib.Helper.Debug( "There was a vesselness node: " + str( currentNode.GetName() ) )
else:
SlicerVmtkCommonLib.Helper.Debug( "There was no vesselness node.." )
# if we don't have a vesselness node, check if we have an original input node
currentNode = self.__inputVolumeNodeSelector.currentNode()
if currentNode:
currentDisplayNode = currentNode.GetDisplayNode()
if currentDisplayNode:
currentDisplayNode.SetLowerThreshold( self.__thresholdSlider.minimumValue )
currentDisplayNode.SetUpperThreshold( self.__thresholdSlider.maximumValue )
currentDisplayNode.SetApplyThreshold( 1 )
self.__updating = 0
def onIOAdvancedToggle( self ):
'''
Show the I/O Advanced panel
'''
if self.__ioAdvancedToggle.checked:
self.__ioAdvancedPanel.show()
else:
self.__ioAdvancedPanel.hide()
def onSegmentationAdvancedToggle( self ):
'''
Show the Segmentation Advanced panel
'''
if self.__segmentationAdvancedToggle.checked:
self.__segmentationAdvancedPanel.show()
else:
self.__segmentationAdvancedPanel.hide()
def restoreDefaults( self ):
'''
scope == 0: reset all
scope == 1: reset only threshold slider
'''
if not self.__updating:
self.__updating = 1
SlicerVmtkCommonLib.Helper.Debug( "restoreDefaults" )
self.__thresholdSlider.minimum = 0
self.__thresholdSlider.maximum = 100
self.__thresholdSlider.minimumValue = 0
self.__thresholdSlider.maximumValue = 100
self.__thresholdSlider.singleStep = 1
self.__ioAdvancedToggle.setChecked( False )
self.__segmentationAdvancedToggle.setChecked( False )
self.__ioAdvancedPanel.hide()
self.__segmentationAdvancedPanel.hide()
self.__inflationSlider.value = 0
self.__curvatureSlider.value = 70
self.__attractionSlider.value = 50
self.__iterationSpinBox.value = 10
self.__updating = 0
# reset threshold on display node
self.resetThresholdOnDisplayNode()
# if a volume is selected, the threshold slider values have to match it
self.onInputVolumeChanged()
def start( self, preview=False ):
'''
'''
SlicerVmtkCommonLib.Helper.Debug( "Starting Level Set Segmentation.." )
# first we need the nodes
currentVolumeNode = self.__inputVolumeNodeSelector.currentNode()
currentSeedsNode = self.__seedFiducialsNodeSelector.currentNode()
currentVesselnessNode = self.__vesselnessVolumeNodeSelector.currentNode()
currentStoppersNode = self.__stopperFiducialsNodeSelector.currentNode()
currentLabelMapNode = self.__outputVolumeNodeSelector.currentNode()
currentModelNode = self.__outputModelNodeSelector.currentNode()
if not currentVolumeNode:
# we need a input volume node
return 0
if not currentSeedsNode:
# we need a seeds node
return 0
if not currentStoppersNode or currentStoppersNode.GetID() == currentSeedsNode.GetID():
# we need a current stopper node
# self.__stopperFiducialsNodeSelector.addNode()
pass
if not currentLabelMapNode or currentLabelMapNode.GetID() == currentVolumeNode.GetID():
# we need a current labelMap node
newLabelMapDisplayNode = slicer.mrmlScene.CreateNodeByClass( "vtkMRMLLabelMapVolumeDisplayNode" )
newLabelMapDisplayNode.SetScene( slicer.mrmlScene )
newLabelMapDisplayNode.SetDefaultColorMap()
slicer.mrmlScene.AddNode( newLabelMapDisplayNode )
newLabelMapNode = slicer.mrmlScene.CreateNodeByClass( "vtkMRMLScalarVolumeNode" )
newLabelMapNode.CopyOrientation( currentVolumeNode )
newLabelMapNode.SetScene( slicer.mrmlScene )
newLabelMapNode.SetName( slicer.mrmlScene.GetUniqueNameByString( self.__outputVolumeNodeSelector.baseName ) )
newLabelMapNode.LabelMapOn()
newLabelMapNode.SetAndObserveDisplayNodeID( newLabelMapDisplayNode.GetID() )
slicer.mrmlScene.AddNode( newLabelMapNode )
currentLabelMapNode = newLabelMapNode
self.__outputVolumeNodeSelector.setCurrentNode( currentLabelMapNode )
if not currentModelNode:
# we need a current model node, the display node is created later
newModelNode = slicer.mrmlScene.CreateNodeByClass( "vtkMRMLModelNode" )
newModelNode.SetScene( slicer.mrmlScene )
newModelNode.SetName( slicer.mrmlScene.GetUniqueNameByString( self.__outputModelNodeSelector.baseName ) )
slicer.mrmlScene.AddNode( newModelNode )
currentModelNode = newModelNode
self.__outputModelNodeSelector.setCurrentNode( currentModelNode )
# now we need to convert the fiducials to vtkIdLists
seeds = SlicerVmtkCommonLib.Helper.convertFiducialHierarchyToVtkIdList( currentSeedsNode, currentVolumeNode )
# stoppers = SlicerVmtkCommonLib.Helper.convertFiducialHierarchyToVtkIdList(currentStoppersNode, currentVolumeNode)
stoppers = vtk.vtkIdList() # TODO
# the input image for the initialization
inputImage = vtk.vtkImageData()
# check if we have a vesselnessNode - this will be our input for the initialization then
if currentVesselnessNode:
# yes, there is one
inputImage.DeepCopy( currentVesselnessNode.GetImageData() )
else:
# no, there is none - we use the original image
inputImage.DeepCopy( currentVolumeNode.GetImageData() )
inputImage.Update()
# initialization
initImageData = vtk.vtkImageData()
# evolution
evolImageData = vtk.vtkImageData()
# perform the initialization
initImageData.DeepCopy( self.GetLogic().performInitialization( inputImage,
self.__thresholdSlider.minimumValue,
self.__thresholdSlider.maximumValue,
seeds,
stoppers,
0 ) ) # TODO sidebranch ignore feature
initImageData.Update()
if not initImageData.GetPointData().GetScalars():
# something went wrong, the image is empty
SlicerVmtkCommonLib.Helper.Info( "Segmentation failed - the output was empty.." )
return -1
# check if it is a preview call
if preview:
# if this is a preview call, we want to skip the evolution
evolImageData.DeepCopy( initImageData )
else:
# no preview, run the whole thing! we never use the vesselness node here, just the original one
evolImageData.DeepCopy( self.GetLogic().performEvolution( currentVolumeNode.GetImageData(),
initImageData,
self.__iterationSpinBox.value,
self.__inflationSlider.value,
self.__curvatureSlider.value,
self.__attractionSlider.value,
'geodesic' ) )
evolImageData.Update()
# create segmentation labelMap
labelMap = vtk.vtkImageData()
labelMap.DeepCopy( self.GetLogic().buildSimpleLabelMap( evolImageData, 0, 5 ) )
labelMap.Update()
currentLabelMapNode.CopyOrientation( currentVolumeNode )
# propagate the label map to the node
currentLabelMapNode.SetAndObserveImageData( labelMap )
currentLabelMapNode.Modified()
# deactivate the threshold in the GUI
self.resetThresholdOnDisplayNode()
# self.onInputVolumeChanged()
# show the segmentation results in the GUI
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
if preview and currentVesselnessNode:
# if preview and a vesselnessNode was configured, show it
selectionNode.SetReferenceActiveVolumeID( currentVesselnessNode.GetID() )
else:
# if not preview, show the original volume
if currentVesselnessNode:
selectionNode.SetReferenceSecondaryVolumeID( currentVesselnessNode.GetID() )
selectionNode.SetReferenceActiveVolumeID( currentVolumeNode.GetID() )
selectionNode.SetReferenceActiveLabelVolumeID( currentLabelMapNode.GetID() )
slicer.app.applicationLogic().PropagateVolumeSelection()
# generate 3D model
model = vtk.vtkPolyData()
# we need the ijkToRas transform for the marching cubes call
ijkToRasMatrix = vtk.vtkMatrix4x4()
currentLabelMapNode.GetIJKToRASMatrix( ijkToRasMatrix )
# call marching cubes
model.DeepCopy( self.GetLogic().marchingCubes( evolImageData, ijkToRasMatrix, 0.0 ) )
model.Update()
# propagate model to nodes
currentModelNode.SetAndObservePolyData( model )
currentModelNode.Modified()
currentModelDisplayNode = currentModelNode.GetDisplayNode()
if not currentModelDisplayNode:
# create new displayNode
currentModelDisplayNode = slicer.mrmlScene.CreateNodeByClass( "vtkMRMLModelDisplayNode" )
slicer.mrmlScene.AddNode( currentModelDisplayNode )
# always configure the displayNode to show the model
currentModelDisplayNode.SetInputPolyData( currentModelNode.GetPolyData() )
currentModelDisplayNode.SetColor( 1.0, 0.55, 0.4 ) # red
currentModelDisplayNode.SetBackfaceCulling( 0 )
currentModelDisplayNode.SetSliceIntersectionVisibility( 0 )
currentModelDisplayNode.SetVisibility( 1 )
currentModelDisplayNode.SetOpacity( 1.0 )
currentModelDisplayNode.Modified()
# update the reference between model node and it's display node
currentModelNode.SetAndObserveDisplayNodeID( currentModelDisplayNode.GetID() )
currentModelNode.Modified()
# fit slice to all sliceviewers
slicer.app.applicationLogic().FitSliceToAll()
# jump all sliceViewers to the first fiducial point, if one was used
if currentSeedsNode:
currentCoordinatesRAS = [0, 0, 0]
if isinstance( currentSeedsNode, slicer.vtkMRMLMarkupsFiducialNode ):
# let's get the first children
currentSeedsNode.GetNthFiducialPosition(0,currentCoordinatesRAS)
numberOfSliceNodes = slicer.mrmlScene.GetNumberOfNodesByClass( 'vtkMRMLSliceNode' )
for n in xrange( numberOfSliceNodes ):
sliceNode = slicer.mrmlScene.GetNthNodeByClass( n, "vtkMRMLSliceNode" )
if sliceNode:
sliceNode.JumpSliceByOffsetting( currentCoordinatesRAS[0], currentCoordinatesRAS[1], currentCoordinatesRAS[2] )
# center 3D view(s) on the new model
if currentCoordinatesRAS:
for d in range( slicer.app.layoutManager().threeDViewCount ):
threeDView = slicer.app.layoutManager().threeDWidget( d ).threeDView()
# reset the focal point
threeDView.resetFocalPoint()
# and fly to our seed point
interactor = threeDView.interactor()
renderer = threeDView.renderWindow().GetRenderers().GetItemAsObject( 0 )
interactor.FlyTo( renderer, currentCoordinatesRAS[0], currentCoordinatesRAS[1], currentCoordinatesRAS[2] )
SlicerVmtkCommonLib.Helper.Debug( "End of Level Set Segmentation.." )
class Slicelet( object ):
"""A slicer slicelet is a module widget that comes up in stand alone mode
implemented as a python class.
This class provides common wrapper functionality used by all slicer modlets.
"""
# TODO: put this in a SliceletLib
# TODO: parse command line arge
def __init__( self, widgetClass=None ):
self.parent = qt.QFrame()
self.parent.setLayout( qt.QVBoxLayout() )
# TODO: should have way to pop up python interactor
self.buttons = qt.QFrame()
self.buttons.setLayout( qt.QHBoxLayout() )
self.parent.layout().addWidget( self.buttons )
self.addDataButton = qt.QPushButton( "Add Data" )
self.buttons.layout().addWidget( self.addDataButton )
self.addDataButton.connect( "clicked()", slicer.app.ioManager().openAddDataDialog )
self.loadSceneButton = qt.QPushButton( "Load Scene" )
self.buttons.layout().addWidget( self.loadSceneButton )
self.loadSceneButton.connect( "clicked()", slicer.app.ioManager().openLoadSceneDialog )
if widgetClass:
self.widget = widgetClass( self.parent )
self.widget.setup()
self.parent.show()
class LevelSetSegmentationSlicelet( Slicelet ):
""" Creates the interface when module is run as a stand alone gui app.
"""
def __init__( self ):
super( LevelSetSegmentationSlicelet, self ).__init__( LevelSetSegmentationWidget )
if __name__ == "__main__":
# TODO: need a way to access and parse command line arguments
# TODO: ideally command line args should handle --xml
import sys
print( sys.argv )
slicelet = LevelSetSegmentationSlicelet()
| jcfr/SlicerExtension-VMTK | PythonModules/LevelSetSegmentation.py | Python | apache-2.0 | 31,688 | [
"VTK"
] | 23fbbe4d9f0d875db55a8dd0e5022b081d01d086e81b7c5d0c508002674ec903 |
"""
This migration script adds the request_event table and
removes the state field in the request table
"""
# Need our custom types, but don't import anything else from model
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.exc import *
from migrate import *
from migrate.changeset import *
import datetime
import sys
import logging
now = datetime.datetime.utcnow
from galaxy.model.custom_types import TrimmedString
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def display_migration_details():
print "========================================"
print "This migration script adds the request_event table and"
print "removes the state field in the request table"
print "========================================"
RequestEvent_table = Table('request_event', metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "request_id", Integer, ForeignKey( "request.id" ), index=True ),
Column( "state", TrimmedString( 255 ), index=True ),
Column( "comment", TEXT ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
display_migration_details()
def localtimestamp():
if migrate_engine.name == 'postgresql' or migrate_engine.name == 'mysql':
return "LOCALTIMESTAMP"
elif migrate_engine.name == 'sqlite':
return "current_date || ' ' || current_time"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name )
def nextval( table, col='id' ):
if migrate_engine.name == 'postgresql':
return "nextval('%s_%s_seq')" % ( table, col )
elif migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
return "null"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name )
# Load existing tables
metadata.reflect()
# Add new request_event table
try:
RequestEvent_table.create()
except Exception, e:
log.debug( "Creating request_event table failed: %s" % str( e ) )
# move the current state of all existing requests to the request_event table
cmd = \
"INSERT INTO request_event " + \
"SELECT %s AS id," + \
"%s AS create_time," + \
"%s AS update_time," + \
"request.id AS request_id," + \
"request.state AS state," + \
"'%s' AS comment " + \
"FROM request;"
cmd = cmd % ( nextval('request_event'), localtimestamp(), localtimestamp(), 'Imported from request table')
migrate_engine.execute( cmd )
if migrate_engine.name != 'sqlite':
# Delete the state column
try:
Request_table = Table( "request", metadata, autoload=True )
except NoSuchTableError:
Request_table = None
log.debug( "Failed loading table request" )
if Request_table is not None:
try:
Request_table.c.state.drop()
except Exception, e:
log.debug( "Deleting column 'state' to request table failed: %s" % ( str( e ) ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0027_request_events.py | Python | gpl-3.0 | 3,542 | [
"Galaxy"
] | fc22011f0884fd57c5aa6002983eb1851623c7ddf9bfdb52ba2e16569f9ae246 |
# Copyright 2013-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
from pathlib import Path
import functools
import re
import sysconfig
import typing as T
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import (
DependencyException, DependencyMethods, ExternalDependency,
PkgConfigDependency, CMakeDependency, ConfigToolDependency,
factory_methods, DependencyFactory,
)
if T.TYPE_CHECKING:
from ..environment import Environment, MachineChoice
from .base import DependencyType, Dependency # noqa: F401
@factory_methods({DependencyMethods.PKGCONFIG, DependencyMethods.CMAKE})
def netcdf_factory(env: 'Environment', for_machine: 'MachineChoice',
kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List['DependencyType']:
language = kwargs.get('language', 'c')
if language not in ('c', 'cpp', 'fortran'):
raise DependencyException(f'Language {language} is not supported with NetCDF.')
candidates = [] # type: T.List['DependencyType']
if DependencyMethods.PKGCONFIG in methods:
if language == 'fortran':
pkg = 'netcdf-fortran'
else:
pkg = 'netcdf'
candidates.append(functools.partial(PkgConfigDependency, pkg, env, kwargs, language=language))
if DependencyMethods.CMAKE in methods:
candidates.append(functools.partial(CMakeDependency, 'NetCDF', env, kwargs, language=language))
return candidates
class OpenMPDependency(ExternalDependency):
# Map date of specification release (which is the macro value) to a version.
VERSIONS = {
'201811': '5.0',
'201611': '5.0-revision1', # This is supported by ICC 19.x
'201511': '4.5',
'201307': '4.0',
'201107': '3.1',
'200805': '3.0',
'200505': '2.5',
'200203': '2.0',
'199810': '1.0',
}
def __init__(self, environment, kwargs):
language = kwargs.get('language')
super().__init__('openmp', environment, kwargs, language=language)
self.is_found = False
if self.clib_compiler.get_id() == 'pgi':
# through at least PGI 19.4, there is no macro defined for OpenMP, but OpenMP 3.1 is supported.
self.version = '3.1'
self.is_found = True
self.compile_args = self.link_args = self.clib_compiler.openmp_flags()
return
try:
openmp_date = self.clib_compiler.get_define(
'_OPENMP', '', self.env, self.clib_compiler.openmp_flags(), [self], disable_cache=True)[0]
except mesonlib.EnvironmentException as e:
mlog.debug('OpenMP support not available in the compiler')
mlog.debug(e)
openmp_date = None
if openmp_date:
try:
self.version = self.VERSIONS[openmp_date]
except KeyError:
mlog.debug(f'Could not find an OpenMP version matching {openmp_date}')
if openmp_date == '_OPENMP':
mlog.debug('This can be caused by flags such as gcc\'s `-fdirectives-only`, which affect preprocessor behavior.')
return
# Flang has omp_lib.h
header_names = ('omp.h', 'omp_lib.h')
for name in header_names:
if self.clib_compiler.has_header(name, '', self.env, dependencies=[self], disable_cache=True)[0]:
self.is_found = True
self.compile_args = self.clib_compiler.openmp_flags()
self.link_args = self.clib_compiler.openmp_link_flags()
break
if not self.is_found:
mlog.log(mlog.yellow('WARNING:'), 'OpenMP found but omp.h missing.')
class ThreadDependency(ExternalDependency):
def __init__(self, name: str, environment, kwargs):
super().__init__(name, environment, kwargs)
self.is_found = True
# Happens if you are using a language with threads
# concept without C, such as plain Cuda.
if self.clib_compiler is None:
self.compile_args = []
self.link_args = []
else:
self.compile_args = self.clib_compiler.thread_flags(environment)
self.link_args = self.clib_compiler.thread_link_flags(environment)
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.CMAKE]
class BlocksDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('blocks', environment, kwargs)
self.name = 'blocks'
self.is_found = False
if self.env.machines[self.for_machine].is_darwin():
self.compile_args = []
self.link_args = []
else:
self.compile_args = ['-fblocks']
self.link_args = ['-lBlocksRuntime']
if not self.clib_compiler.has_header('Block.h', '', environment, disable_cache=True) or \
not self.clib_compiler.find_library('BlocksRuntime', environment, []):
mlog.log(mlog.red('ERROR:'), 'BlocksRuntime not found.')
return
source = '''
int main(int argc, char **argv)
{
int (^callback)(void) = ^ int (void) { return 0; };
return callback();
}'''
with self.clib_compiler.compile(source, extra_args=self.compile_args + self.link_args) as p:
if p.returncode != 0:
mlog.log(mlog.red('ERROR:'), 'Compiler does not support blocks extension.')
return
self.is_found = True
class Python3DependencySystem(ExternalDependency):
def __init__(self, name, environment, kwargs):
super().__init__(name, environment, kwargs)
if not environment.machines.matches_build_machine(self.for_machine):
return
if not environment.machines[self.for_machine].is_windows():
return
self.name = 'python3'
self.static = kwargs.get('static', False)
# We can only be sure that it is Python 3 at this point
self.version = '3'
self._find_libpy3_windows(environment)
@staticmethod
def get_windows_python_arch():
pyplat = sysconfig.get_platform()
if pyplat == 'mingw':
pycc = sysconfig.get_config_var('CC')
if pycc.startswith('x86_64'):
return '64'
elif pycc.startswith(('i686', 'i386')):
return '32'
else:
mlog.log('MinGW Python built with unknown CC {!r}, please file'
'a bug'.format(pycc))
return None
elif pyplat == 'win32':
return '32'
elif pyplat in ('win64', 'win-amd64'):
return '64'
mlog.log(f'Unknown Windows Python platform {pyplat!r}')
return None
def get_windows_link_args(self):
pyplat = sysconfig.get_platform()
if pyplat.startswith('win'):
vernum = sysconfig.get_config_var('py_version_nodot')
if self.static:
libpath = Path('libs') / f'libpython{vernum}.a'
else:
comp = self.get_compiler()
if comp.id == "gcc":
libpath = f'python{vernum}.dll'
else:
libpath = Path('libs') / f'python{vernum}.lib'
lib = Path(sysconfig.get_config_var('base')) / libpath
elif pyplat == 'mingw':
if self.static:
libname = sysconfig.get_config_var('LIBRARY')
else:
libname = sysconfig.get_config_var('LDLIBRARY')
lib = Path(sysconfig.get_config_var('LIBDIR')) / libname
if not lib.exists():
mlog.log('Could not find Python3 library {!r}'.format(str(lib)))
return None
return [str(lib)]
def _find_libpy3_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = self.get_windows_python_arch()
if pyarch is None:
self.is_found = False
return
arch = detect_cpu_family(env.coredata.compilers.host)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log(f'Unknown architecture {arch!r} for',
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch:
mlog.log('Need', mlog.bold(self.name), 'for {}-bit, but '
'found {}-bit'.format(arch, pyarch))
self.is_found = False
return
# This can fail if the library is not found
largs = self.get_windows_link_args()
if largs is None:
self.is_found = False
return
self.link_args = largs
# Compile args
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.compile_args = ['-I' + inc]
if inc != platinc:
self.compile_args.append('-I' + platinc)
self.version = sysconfig.get_config_var('py_version')
self.is_found = True
@staticmethod
def get_methods():
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG]
def log_tried(self):
return 'sysconfig'
class PcapDependencyConfigTool(ConfigToolDependency):
tools = ['pcap-config']
tool_name = 'pcap-config'
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
if not self.is_found:
return
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--libs'], 'link_args')
self.version = self.get_pcap_lib_version()
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
def get_pcap_lib_version(self):
# Since we seem to need to run a program to discover the pcap version,
# we can't do that when cross-compiling
# FIXME: this should be handled if we have an exe_wrapper
if not self.env.machines.matches_build_machine(self.for_machine):
return None
v = self.clib_compiler.get_return_value('pcap_lib_version', 'string',
'#include <pcap.h>', self.env, [], [self])
v = re.sub(r'libpcap version ', '', v)
v = re.sub(r' -- Apple version.*$', '', v)
return v
class CupsDependencyConfigTool(ConfigToolDependency):
tools = ['cups-config']
tool_name = 'cups-config'
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
if not self.is_found:
return
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--ldflags', '--libs'], 'link_args')
@staticmethod
def get_methods():
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.CMAKE]
class LibWmfDependencyConfigTool(ConfigToolDependency):
tools = ['libwmf-config']
tool_name = 'libwmf-config'
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
if not self.is_found:
return
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--libs'], 'link_args')
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class LibGCryptDependencyConfigTool(ConfigToolDependency):
tools = ['libgcrypt-config']
tool_name = 'libgcrypt-config'
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
if not self.is_found:
return
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--libs'], 'link_args')
self.version = self.get_config_value(['--version'], 'version')[0]
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class GpgmeDependencyConfigTool(ConfigToolDependency):
tools = ['gpgme-config']
tool_name = 'gpg-config'
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
if not self.is_found:
return
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--libs'], 'link_args')
self.version = self.get_config_value(['--version'], 'version')[0]
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class ShadercDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('shaderc', environment, kwargs)
static_lib = 'shaderc_combined'
shared_lib = 'shaderc_shared'
libs = [shared_lib, static_lib]
if self.static:
libs.reverse()
cc = self.get_compiler()
for lib in libs:
self.link_args = cc.find_library(lib, environment, [])
if self.link_args is not None:
self.is_found = True
if self.static and lib != static_lib:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(static_lib, self.name))
break
def log_tried(self):
return 'system'
@staticmethod
def get_methods():
return [DependencyMethods.SYSTEM, DependencyMethods.PKGCONFIG]
class CursesConfigToolDependency(ConfigToolDependency):
"""Use the curses config tools."""
tool = 'curses-config'
# ncurses5.4-config is for macOS Catalina
tools = ['ncursesw6-config', 'ncursesw5-config', 'ncurses6-config', 'ncurses5-config', 'ncurses5.4-config']
def __init__(self, name: str, env: 'Environment', kwargs: T.Dict[str, T.Any], language: T.Optional[str] = None):
super().__init__(name, env, kwargs, language)
if not self.is_found:
return
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--libs'], 'link_args')
class CursesSystemDependency(ExternalDependency):
"""Curses dependency the hard way.
This replaces hand rolled find_library() and has_header() calls. We
provide this for portability reasons, there are a large number of curses
implementations, and the differences between them can be very annoying.
"""
def __init__(self, name: str, env: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, env, kwargs)
candidates = [
('pdcurses', ['pdcurses/curses.h']),
('ncursesw', ['ncursesw/ncurses.h', 'ncurses.h']),
('ncurses', ['ncurses/ncurses.h', 'ncurses/curses.h', 'ncurses.h']),
('curses', ['curses.h']),
]
# Not sure how else to elegently break out of both loops
for lib, headers in candidates:
l = self.clib_compiler.find_library(lib, env, [])
if l:
for header in headers:
h = self.clib_compiler.has_header(header, '', env)
if h[0]:
self.is_found = True
self.link_args = l
# Not sure how to find version for non-ncurses curses
# implementations. The one in illumos/OpenIndiana
# doesn't seem to have a version defined in the header.
if lib.startswith('ncurses'):
v, _ = self.clib_compiler.get_define('NCURSES_VERSION', f'#include <{header}>', env, [], [self])
self.version = v.strip('"')
if lib.startswith('pdcurses'):
v_major, _ = self.clib_compiler.get_define('PDC_VER_MAJOR', f'#include <{header}>', env, [], [self])
v_minor, _ = self.clib_compiler.get_define('PDC_VER_MINOR', f'#include <{header}>', env, [], [self])
self.version = f'{v_major}.{v_minor}'
# Check the version if possible, emit a wraning if we can't
req = kwargs.get('version')
if req:
if self.version:
self.is_found = mesonlib.version_compare(self.version, req)
else:
mlog.warning('Cannot determine version of curses to compare against.')
if self.is_found:
mlog.debug('Curses library:', l)
mlog.debug('Curses header:', header)
break
if self.is_found:
break
@staticmethod
def get_methods() -> T.List[DependencyMethods]:
return [DependencyMethods.SYSTEM]
@factory_methods({DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.SYSTEM})
def curses_factory(env: 'Environment', for_machine: 'MachineChoice',
kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List[T.Callable[[], 'Dependency']]:
candidates = [] # type: T.List[T.Callable[[], Dependency]]
if DependencyMethods.PKGCONFIG in methods:
pkgconfig_files = ['pdcurses', 'ncursesw', 'ncurses', 'curses']
for pkg in pkgconfig_files:
candidates.append(functools.partial(PkgConfigDependency, pkg, env, kwargs))
# There are path handling problems with these methods on msys, and they
# don't apply to windows otherwise (cygwin is handled separately from
# windows)
if not env.machines[for_machine].is_windows():
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(CursesConfigToolDependency, 'curses', env, kwargs))
if DependencyMethods.SYSTEM in methods:
candidates.append(functools.partial(CursesSystemDependency, 'curses', env, kwargs))
return candidates
@factory_methods({DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM})
def shaderc_factory(env: 'Environment', for_machine: 'MachineChoice',
kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List['DependencyType']:
"""Custom DependencyFactory for ShaderC.
ShaderC's odd you get three different libraries from the same build
thing are just easier to represent as a separate function than
twisting DependencyFactory even more.
"""
candidates = [] # type: T.List['DependencyType']
if DependencyMethods.PKGCONFIG in methods:
# ShaderC packages their shared and static libs together
# and provides different pkg-config files for each one. We
# smooth over this difference by handling the static
# keyword before handing off to the pkg-config handler.
shared_libs = ['shaderc']
static_libs = ['shaderc_combined', 'shaderc_static']
if kwargs.get('static', False):
c = [functools.partial(PkgConfigDependency, name, env, kwargs)
for name in static_libs + shared_libs]
else:
c = [functools.partial(PkgConfigDependency, name, env, kwargs)
for name in shared_libs + static_libs]
candidates.extend(c)
if DependencyMethods.SYSTEM in methods:
candidates.append(functools.partial(ShadercDependency, env, kwargs))
return candidates
cups_factory = DependencyFactory(
'cups',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE],
configtool_class=CupsDependencyConfigTool,
cmake_name='Cups',
)
gpgme_factory = DependencyFactory(
'gpgme',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=GpgmeDependencyConfigTool,
)
libgcrypt_factory = DependencyFactory(
'libgcrypt',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=LibGCryptDependencyConfigTool,
)
libwmf_factory = DependencyFactory(
'libwmf',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=LibWmfDependencyConfigTool,
)
pcap_factory = DependencyFactory(
'pcap',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=PcapDependencyConfigTool,
pkgconfig_name='libpcap',
)
python3_factory = DependencyFactory(
'python3',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM, DependencyMethods.EXTRAFRAMEWORK],
system_class=Python3DependencySystem,
# There is no version number in the macOS version number
framework_name='Python',
# There is a python in /System/Library/Frameworks, but thats python 2.x,
# Python 3 will always be in /Library
extra_kwargs={'paths': ['/Library/Frameworks']},
)
threads_factory = DependencyFactory(
'threads',
[DependencyMethods.SYSTEM, DependencyMethods.CMAKE],
cmake_name='Threads',
system_class=ThreadDependency,
)
| QuLogic/meson | mesonbuild/dependencies/misc.py | Python | apache-2.0 | 22,949 | [
"NetCDF"
] | aebc96e3a7e40fd3cb9a7c604128893ca5db20fa852416c4d0e3486e294cf849 |
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import os
import numpy as np
import montage_wrapper as montage
import shutil
import sys
import glob
import time
from matplotlib.path import Path
from scipy.ndimage import zoom
from pdb import set_trace
_TOP_DIR = '/data/tycho/0/leroy.42/allsky/'
_INDEX_DIR = os.path.join(_TOP_DIR, 'code/')
_HOME_DIR = '/n/home00/lewis.1590/research/galbase_allsky/'
_MOSAIC_DIR = os.path.join(_HOME_DIR, 'cutouts')
def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):
overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)
#TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER
mean_dec = (min_dec + max_dec) * 0.5
if np.abs(dec_ctr) + pad > 88.0:
return overlap
ra_pad = pad / np.cos(np.radians(mean_dec))
# MERIDIAN CASES
merid = np.where(max_ra < min_ra)
overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]
# BORING CASE
normal = np.where(max_ra > min_ra)
overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]
return overlap
def make_axes(hdr, quiet=False, novec=False, vonly=False, simple=False):
# PULL THE IMAGE/CUBE SIZES FROM THE HEADER
naxis = hdr['NAXIS']
naxis1 = hdr['NAXIS1']
naxis2 = hdr['NAXIS2']
if naxis > 2:
naxis3 = hdr['NAXIS3']
## EXTRACT FITS ASTROMETRY STRUCTURE
ww = pywcs.WCS(hdr)
#IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)
if naxis > 3:
#GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER
cd = ww.wcs.cd
crpix = ww.wcs.crpix
cdelt = ww.wcs.crelt
crval = ww.wcs.crval
if naxis > 2:
# MAKE THE VELOCITY AXIS (WILL BE M/S)
v = np.arange(naxis3) * 1.0
vdif = v - (hdr['CRPIX3']-1)
vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])
# CUT OUT HERE IF WE ONLY WANT VELOCITY INFO
if vonly:
return vaxis
#IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:
if simple:
print('Using simple aproach to make axes.')
print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')
raxis = np.arange(naxis1) * 1.0
rdif = raxis - (hdr['CRPIX1'] - 1)
raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])
daxis = np.arange(naxis2) * 1.0
ddif = daxis - (hdr['CRPIX1'] - 1)
daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])
rimg = raxis # (fltarr(naxis2) + 1.)
dimg = (np.asarray(naxis1) + 1.) # daxis
return rimg, dimg
# OBNOXIOUS SFL/GLS THING
glspos = ww.wcs.ctype[0].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[0]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[0] = ctstr
print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])
glspos = ww.wcs.ctype[1].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[1]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[1] = ctstr
print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])
# CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE
if novec:
rimg = np.zeros((naxis1, naxis2))
dimg = np.zeros((naxis1, naxis2))
for i in range(naxis1):
j = np.asarray([0 for i in xrange(naxis2)])
pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)
ra, dec = ww.all_pix2world(pixcrd, 1)
rimg[i, :] = ra
dimg[i, :] = dec
else:
ximg = np.arange(naxis1) * 1.0
yimg = np.arange(naxis1) * 1.0
X, Y = np.meshgrid(ximg, yimg, indexing='xy')
ss = X.shape
xx, yy = X.flatten(), Y.flatten()
pixcrd = np.array(zip(xx, yy), np.float_)
img_new = ww.all_pix2world(pixcrd, 0)
rimg_new, dimg_new = img_new[:,0], img_new[:,1]
rimg = rimg_new.reshape(ss)
dimg = dimg_new.reshape(ss)
# GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW
raxis = np.squeeze(rimg[:, naxis2/2])
daxis = np.squeeze(dimg[naxis1/2, :])
return rimg, dimg
def write_headerfile(header_file, header):
f = open(header_file, 'w')
for iii in range(len(header)):
outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\n'
f.write(outline)
f.close()
def create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale):
hdr = pyfits.Header()
hdr['NAXIS'] = 2
hdr['NAXIS1'] = pix_len
hdr['NAXIS2'] = pix_len
hdr['CTYPE1'] = 'RA---TAN'
hdr['CRVAL1'] = float(ra_ctr)
hdr['CRPIX1'] = (pix_len / 2.) * 1.
hdr['CDELT1'] = -1.0 * pix_scale
hdr['CTYPE2'] = 'DEC--TAN'
hdr['CRVAL2'] = float(dec_ctr)
hdr['CRPIX2'] = (pix_len / 2.) * 1.
hdr['CDELT2'] = pix_scale
hdr['EQUINOX'] = 2000
return hdr
def unwise(band=None, ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None):
tel = 'unwise'
data_dir = os.path.join(_TOP_DIR, tel, 'sorted_tiles')
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, tel + '_index_file.fits')
ext = 1
index, hdr = pyfits.getdata(indexfile, ext, header=True)
# CALIBRATION TO GO FROM VEGAS TO ABMAG
w1_vtoab = 2.683
w2_vtoab = 3.319
w3_vtoab = 5.242
w4_vtoab = 6.604
# NORMALIZATION OF UNITY IN VEGAS MAG
norm_mag = 22.5
pix_as = 2.75 #arcseconds - native detector pixel size wise docs
# COUNTS TO JY CONVERSION
w1_to_mjysr = counts2jy(norm_mag, w1_vtoab, pix_as)
w2_to_mjysr = counts2jy(norm_mag, w2_vtoab, pix_as)
w3_to_mjysr = counts2jy(norm_mag, w3_vtoab, pix_as)
w4_to_mjysr = counts2jy(norm_mag, w4_vtoab, pix_as)
# MAKE A HEADER
pix_scale = 2.0 / 3600. # 2.0 arbitrary
pix_len = size_deg / pix_scale
# this should automatically populate SIMPLE and NAXIS keywords
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['BAND'] = 1, 2, 3, 4 depending on wise band
ind = np.where((index['BAND'] == band) & tile_overlaps)
ct_overlap = len(ind[0])
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
# LOOP OVER OVERLAPPING TILES AND STITCH ONTO TARGET HEADER
for ii in range(0, ct_overlap):
infile = os.path.join(data_dir, index[ind[ii]]['FNAME'])
im, hdr = pyfits.getdata(infile, header=True)
ri, di = make_axes(hdr)
hh = pywcs.WCS(target_hdr)
x, y = ww.all_world2pix(zip(ri, di), 1)
in_image = (x > 0 & x < (sz_out[0]-1)) & (y > 0 and y < (sz_out[1]-1))
if np.sum(in_image) == 0:
print("No overlap. Proceeding.")
continue
if band == 1:
im *= w1_to_mjysr
if band == 2:
im *= w2_to_mjysr
if band == 3:
im *= w3_to_mjysr
if band == 4:
im *= w4_to_mjysr
target_hdr['BUNIT'] = 'MJY/SR'
newimfile = reprojection(infile, im, hdr, target_hdr, data_dir)
im, new_hdr = pyfits.getdata(newimfile, header=True)
useful = np.where(np.isfinite(im))
outim[useful] = im[useful]
return outim, target_hdr
def counts2jy(norm_mag, calibration_value, pix_as):
# convert counts to Jy
val = 10.**((norm_mag + calibration_value) / -2.5)
val *= 3631.0
# then to MJy
val /= 1e6
# then to MJy/sr
val /= np.radians(pix_as / 3600.)**2
return val
def galex(band='fuv', ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None, write_info=True):
tel = 'galex'
data_dir = os.path.join(_TOP_DIR, tel, 'sorted_tiles')
problem_file = os.path.join(_HOME_DIR, 'problem_galaxies.txt')
#numbers_file = os.path.join(_HOME_DIR, 'number_of_tiles_per_galaxy.dat')
bg_reg_file = os.path.join(_HOME_DIR, 'galex_reprojected_bg.reg')
numbers_file = os.path.join(_HOME_DIR, 'gal_reproj_info.dat')
galaxy_mosaic_file = os.path.join(_MOSAIC_DIR, '_'.join([name, band]).upper() + '.FITS')
start_time = time.time()
if not os.path.exists(galaxy_mosaic_file):
#if name == 'NGC2976':
print name
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, tel + '_index_file.fits')
ext = 1
index, hdr = pyfits.getdata(indexfile, ext, header=True)
# CALIBRATION FROM COUNTS TO ABMAG
fuv_toab = 18.82
nuv_toab = 20.08
# PIXEL SCALE IN ARCSECONDS
pix_as = 1.5 # galex pixel scale -- from galex docs
# MAKE A HEADER
pix_scale = 1.5 / 3600. # 1.5 arbitrary: how should I set it?
pix_len = size_deg / pix_scale
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['fuv'] = 1 where fuv and
# index['nuv'] = 1 where nuv
ind = np.where((index[band]) & tile_overlaps)
ct_overlap = len(ind[0])
# MAKE SURE THERE ARE OVERLAPPING TILES
if ct_overlap == 0:
if write_info:
with open(problem_file, 'a') as myfile:
myfile.write(name + ': ' + 'No overlapping tiles\n')
return
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
prihdu = pyfits.PrimaryHDU(data=outim, header=target_hdr)
target_hdr = prihdu.header
# GATHER THE INPUT FILES
infiles = index[ind[0]]['fname']
wtfiles = index[ind[0]]['rrhrfile']
flgfiles = index[ind[0]]['flagfile']
infiles = [os.path.join(data_dir, f) for f in infiles]
wtfiles = [os.path.join(data_dir, f) for f in wtfiles]
flgfiles = [os.path.join(data_dir, f) for f in flgfiles]
# CREATE NEW TEMP DIRECTORY TO STORE TEMPORARY FILES
gal_dir = os.path.join(_HOME_DIR, name)
os.makedirs(gal_dir)
# CREATE SUBDIRECTORIES INSIDE TEMP DIRECTORY FOR ALL TEMP FILES
input_dir = os.path.join(gal_dir, 'input')
reprojected_dir = os.path.join(gal_dir, 'reprojected')
weights_dir = os.path.join(gal_dir, 'weights')
weighted_dir = os.path.join(gal_dir, 'weighted')
final_dir = os.path.join(gal_dir, 'mosaic')
for indir in [input_dir, reprojected_dir, weights_dir, weighted_dir, final_dir]:
os.makedirs(indir)
# SYMLINK ORIGINAL RRHR FILES TO TEMPORARY INPUT DIRECTORY
for wtfile in wtfiles:
basename = os.path.basename(wtfile)
new_wt_file = os.path.join(input_dir, basename)
os.symlink(wtfile, new_wt_file)
for flgfile in flgfiles:
basename = os.path.basename(flgfile)
new_flg_file = os.path.join(input_dir, basename)
os.symlink(flgfile, new_flg_file)
# CONVERT INT FILES TO MJY/SR AND WRITE NEW FILES INTO TEMP DIR
# CONVERT WT FILES TO WT/SR AND WRITE NEW FILES INTO TEMP DIR
int_outfiles = [os.path.join(input_dir, f.split('/')[-1].replace('.fits', '_mjysr.fits')) for f in infiles]
wt_outfiles = [os.path.join(input_dir, f.split('/')[-1].replace('.fits', '_sr.fits')) for f in wtfiles]
for i in range(len(infiles)):
im, hdr = pyfits.getdata(infiles[i], header=True)
wt, whdr = pyfits.getdata(wtfiles[i], header=True)
wt = wtpersr(wt, pix_as)
if band.lower() == 'fuv':
im = counts2jy_galex(im, fuv_toab, pix_as)
if band.lower() == 'nuv':
im = counts2jy_galex(im, nuv_toab, pix_as)
if not os.path.exists(int_outfiles[i]):
pyfits.writeto(int_outfiles[i], im, hdr)
pyfits.writeto(wt_outfiles[i], wt, whdr)
# APPEND UNIT INFORMATION TO THE NEW HEADER
target_hdr['BUNIT'] = 'MJY/SR'
# WRITE OUT A HEADER FILE
hdr_file = os.path.join(gal_dir, name + '_template.hdr')
write_headerfile(hdr_file, target_hdr)
# PERFORM THE REPROJECTION, WEIGHTING, AND EXTRACTION
#try:
# REPROJECT INPUT IMAGES (-int and -rrhr)
int_suff, rrhr_suff, flag_suff = '*_mjysr.fits', '*-rrhr_sr.fits', '*-flags.fits'
int_images = sorted(glob.glob(os.path.join(input_dir, int_suff)))
rrhr_images = sorted(glob.glob(os.path.join(input_dir, rrhr_suff)))
flag_images = sorted(glob.glob(os.path.join(input_dir, flag_suff)))
reproject_images(hdr_file, int_images, rrhr_images, flag_images, input_dir, reprojected_dir)
# WEIGHT IMAGES
im_suff, wt_suff = '*_mjysr_masked.fits', '*-rrhr_sr_masked.fits'
imfiles = sorted(glob.glob(os.path.join(reprojected_dir, im_suff)))
wtfiles = sorted(glob.glob(os.path.join(reprojected_dir, wt_suff)))
weight_images(imfiles, wtfiles, weighted_dir, weights_dir)
# CREATE THE METADATA TABLES NEEDED FOR COADDITION
tables = create_tables(weights_dir, weighted_dir)
# COADD THE REPROJECTED, WEIGHTED IMAGES AND THE WEIGHT IMAGES
coadd(hdr_file, final_dir, weights_dir, weighted_dir)
# DIVIDE OUT THE WEIGHTS
imagefile = finish_weight(final_dir)
# SUBTRACT OUT THE BACKGROUND
remove_background(final_dir, imagefile, bg_reg_file)
# COPY MOSAIC FILE TO CUTOUTS DIRECTORY
mosaic_file = os.path.join(final_dir, 'final_mosaic.fits')
newfile = '_'.join([name, band]).upper() + '.FITS'
new_mosaic_file = os.path.join(_MOSAIC_DIR, newfile)
shutil.copy(mosaic_file, new_mosaic_file)
# REMOVE GALAXY DIRECTORY AND EXTRA FILES
#shutil.rmtree(gal_dir)
stop_time = time.time()
total_time = (stop_time - start_time) / 60.
# WRITE OUT THE NUMBER OF TILES THAT OVERLAP THE GIVEN GALAXY
if write_info:
out_arr = [name, len(infiles), np.around(total_time,2)]
with open(numbers_file, 'a') as nfile:
nfile.write('{0: >10}'.format(out_arr[0]))
nfile.write('{0: >6}'.format(out_arr[1]))
nfile.write('{0: >6}'.format(out_arr[2]) + '\n')
#nfile.write(name + ': ' + str(len(infiles)) + '\n')
# SOMETHING WENT WRONG
#except Exception as inst:
# me = sys.exc_info()[0]
# if write_info:
# with open(problem_file, 'a') as myfile:
# myfile.write(name + ': ' + str(me) + ': '+str(inst)+'\n')
# shutil.rmtree(gal_dir)
return
def counts2jy_galex(counts, cal, pix_as):
# first convert to abmag
abmag = -2.5 * np.log10(counts) + cal
# then convert to Jy
f_nu = 10**(abmag/-2.5) * 3631.
# then to MJy
f_nu *= 1e-6
# then to MJy/sr
val = f_nu / (np.radians(pix_as/3600))**2
return val
#val = flux / MJYSR2JYARCSEC / pixel_area / 1e-23 / C * FUV_LAMBDA**2
def wtpersr(wt, pix_as):
return wt / (np.radians(pix_as/3600))**2
def mask_galex_edges(infile, flagfile, outfile=None, chip_rad = 1400, chip_x0=1920, chip_y0=1920):
if outfile is None:
outfile = infile.replace('.fits', '_masked.fits')
if not os.path.exists(outfile):
data, hdr = pyfits.getdata(infile, header=True)
flag, fhdr = pyfits.getdata(flagfile, header=True)
factor = float(len(data)) / len(flag)
upflag = zoom(flag, factor, order=0)
# chip_x0, chip_y0 = hdr['CRPIX1'], hdr['CRPIX2']
x = np.arange(data.shape[1]).reshape(1, -1) + 1
y = np.arange(data.shape[0]).reshape(-1, 1) + 1
r = np.sqrt((x - chip_x0)**2 + (y - chip_y0)**2)
i = (r > chip_rad) #& (upflag > 0)#(data == 0)# & (upflag > 0)
newdata = np.where(i, np.nan, data)
#data = np.where(i, 0.0, data)
pyfits.writeto(outfile, data, hdr)
def mask_galex(intfile, wtfile, flagfile, outfile=None, chip_rad = 1400, chip_x0=1920, chip_y0=1920, out_intfile=None, out_wtfile=None):
if out_intfile is None:
out_intfile = intfile.replace('.fits', '_masked.fits')
if out_wtfile is None:
out_wtfile = wtfile.replace('.fits', '_masked.fits')
if not os.path.exists(out_intfile):
data, hdr = pyfits.getdata(intfile, header=True)
wt, whdr = pyfits.getdata(wtfile, header=True)
flag, fhdr = pyfits.getdata(flagfile, header=True)
factor = float(len(data)) / len(flag)
upflag = zoom(flag, factor, order=0)
# chip_x0, chip_y0 = hdr['CRPIX1'], hdr['CRPIX2']
x = np.arange(data.shape[1]).reshape(1, -1) + 1
y = np.arange(data.shape[0]).reshape(-1, 1) + 1
r = np.sqrt((x - chip_x0)**2 + (y - chip_y0)**2)
i = (r > chip_rad) | (data == 0)
data = np.where(i, 0, data)
wt = np.where(i, 1e-20, wt)
pyfits.writeto(out_intfile, data, hdr)
pyfits.writeto(out_wtfile, wt, whdr)
def reproject_images(template_header, int_images, rrhr_images, flag_images, input_dir, reprojected_dir, whole=True, exact=True):
# MASK IMAGES
for i in range(len(int_images)):
image_infile = int_images[i]
wt_infile = rrhr_images[i]
flg_infile = flag_images[i]
image_outfile = os.path.join(input_dir, os.path.basename(image_infile).replace('.fits', '_masked.fits'))
wt_outfile = os.path.join(input_dir, os.path.basename(wt_infile).replace('.fits', '_masked.fits'))
#mask_galex_edges(image_infile, flg_infile, outfile=image_outfile)
#mask_galex_edges(wt_infile, flg_infile, outfile=wt_outfile)
mask_galex(image_infile, wt_infile, flg_infile, out_intfile=image_outfile, out_wtfile=wt_outfile)
# REPROJECT IMAGES
input_table = os.path.join(input_dir, 'input.tbl')
montage.mImgtbl(input_dir, input_table, corners=True)
# Create reprojection directory, reproject, and get image metadata
#whole = True #if background_match else False
stats_table = os.path.join(reprojected_dir, 'mProjExec_stats.log')
montage.mProjExec(input_table, template_header, reprojected_dir, stats_table, raw_dir=input_dir, whole=whole, exact=exact)
reprojected_table = os.path.join(reprojected_dir, 'reprojected.tbl')
montage.mImgtbl(reprojected_dir, reprojected_table, corners=True)
def weight_images(imfiles, wtfiles, weighted_dir, weights_dir):
for i in range(len(imfiles)):
imfile = imfiles[i]
wtfile = wtfiles[i]
im, hdr = pyfits.getdata(imfile, header=True)
rrhr, rrhrhdr = pyfits.getdata(wtfile, header=True)
wt = rrhr
newim = im * wt
nf = imfiles[i].split('/')[-1].replace('.fits', '_weighted.fits')
newfile = os.path.join(weighted_dir, nf)
pyfits.writeto(newfile, newim, hdr)
old_area_file = imfiles[i].replace('.fits', '_area.fits')
new_area_file = newfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
nf = wtfiles[i].split('/')[-1].replace('.fits', '_weights.fits')
weightfile = os.path.join(weights_dir, nf)
pyfits.writeto(weightfile, wt, rrhrhdr)
old_area_file = wtfiles[i].replace('.fits', '_area.fits')
new_area_file = weightfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
def create_tables(weights_dir, weighted_dir):
return_tables = []
in_dir = weights_dir
reprojected_table = os.path.join(in_dir, 'weights_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return_tables.append(reprojected_table)
in_dir = weighted_dir
reprojected_table = os.path.join(in_dir, 'int_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return_tables.append(reprojected_table)
return return_tables
def coadd(template_header, output_dir, weights_dir, weighted_dir):
img_dirs = [weights_dir, weighted_dir]
outputs = ['weights', 'int']
for img_dir, output in zip(img_dirs, outputs):
reprojected_table = os.path.join(img_dir, output + '_reprojected.tbl')
out_image = os.path.join(output_dir, output + '_mosaic.fits')
montage.mAdd(reprojected_table, template_header, out_image, img_dir=img_dir, exact=True)
def finish_weight(output_dir):
image_file = os.path.join(output_dir, 'int_mosaic.fits')
wt_file = os.path.join(output_dir, 'weights_mosaic.fits')
im, hdr = pyfits.getdata(image_file, header=True)
wt, wthdr = pyfits.getdata(wt_file, header=True)
newim = im / wt
newfile = os.path.join(output_dir, 'image_mosaic.fits')
pyfits.writeto(newfile, newim, hdr)
return newfile
def remove_background(final_dir, imfile, bgfile):
data, hdr = pyfits.getdata(imfile, header=True)
box_inds = read_bg_regfile(bgfile)
allvals = []
sample_means = []
for box in box_inds:
rectangle = zip(box[0::2], box[1::2])
sample = get_bg_sample(data, hdr, rectangle)
for s in sample:
allvals.append(s)
sample_mean = np.nanmean(sample)
sample_means.append(sample_mean)
this_mean = np.around(np.nanmean(sample_means), 8)
final_data = data - this_mean
hdr['BG'] = this_mean
hdr['comment'] = 'Background has been subtracted.'
outfile = os.path.join(final_dir, 'final_mosaic.fits')
pyfits.writeto(outfile, final_data, hdr)
def read_bg_regfile(regfile):
f = open(regfile, 'r')
boxes = f.readlines()
f.close()
box_list = []
for b in boxes:
this_box = []
box = b.strip('polygon()\n').split(',')
[this_box.append(int(np.around(float(bb), 0))) for bb in box]
box_list.append(this_box)
return box_list
def get_bg_sample(data, hdr, box):
wcs = pywcs.WCS(hdr, naxis=2)
x, y = np.arange(data.shape[0]), np.arange(data.shape[1])
X, Y = np.meshgrid(x, y, indexing='ij')
xx, yy = X.flatten(), Y.flatten()
pixels = np.array(zip(yy, xx))
box_coords = box
sel = Path(box_coords).contains_points(pixels)
sample = data.flatten()[sel]
return sample
| arlewis/galaxy_cutouts | versions/extract_stamp_v2.py | Python | mit | 23,234 | [
"Galaxy"
] | 88546be8312873a514343f2bfc848a9a5ebdc129d10ec988d377c272e723fff3 |
########################################################################
# File: Operation.py
# Date: 2012/07/24 12:12:05
########################################################################
"""
:mod: Operation
.. module: Operation
:synopsis: Operation implementation
Operation implementation
"""
# Disable invalid names warning
# pylint: disable=invalid-name
__RCSID__ = "$Id$"
import datetime
from types import StringTypes
import json
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.JSONUtils import RMSEncoder
########################################################################
class Operation( object ):
"""
:param long OperationID: OperationID as read from DB backend
:param long RequestID: parent RequestID
:param str Status: execution status
:param str Type: operation to perform
:param str Arguments: additional arguments
:param str SourceSE: source SE name
:param str TargetSE: target SE names as comma separated list
:param str Catalog: catalog to use as comma separated list
:param str Error: error string if any
:param Request.Request parent: parent Request instance
It is managed by SQLAlchemy, so the RequestID, OperationID should never be set by hand
(except when constructed from JSON of course...)
In principle, the _parent attribute could be totally managed by SQLAlchemy. However, it is
set only when inserted into the DB, this is why I manually set it in the Request _notify
"""
# # max files in a single operation
MAX_FILES = 10000
# # all states
ALL_STATES = ( "Queued", "Waiting", "Scheduled", "Assigned", "Failed", "Done", "Canceled" )
# # final states
FINAL_STATES = ( "Failed", "Done", "Canceled" )
# # valid attributes
ATTRIBUTE_NAMES = ['OperationID', 'RequestID', "Type", "Status", "Arguments",
"Order", "SourceSE", "TargetSE", "Catalog", "Error",
"CreationTime", "SubmitTime", "LastUpdate"]
_datetimeFormat = '%Y-%m-%d %H:%M:%S'
def __init__( self, fromDict = None ):
""" c'tor
:param self: self reference
:param dict fromDict: attributes dictionary
"""
self._parent = None
now = datetime.datetime.utcnow().replace( microsecond = 0 )
self._SubmitTime = now
self._LastUpdate = now
self._CreationTime = now
self._Status = "Queued"
self._Order = 0
self.__files__ = []
self.TargetSE = None
self.SourceSE = None
self.Arguments = None
self.Error = None
self.Type = None
self._Catalog = None
fromDict = fromDict if isinstance( fromDict, dict )\
else json.loads( fromDict ) if isinstance( fromDict, StringTypes )\
else {}
if "Files" in fromDict:
for fileDict in fromDict.get( "Files", [] ):
self.addFile( File( fileDict ) )
del fromDict["Files"]
for key, value in fromDict.items():
# The JSON module forces the use of UTF-8, which is not properly
# taken into account in DIRAC.
# One would need to replace all the '== str' with 'in StringTypes'
if type( value ) in StringTypes:
value = value.encode()
if value:
setattr( self, key, value )
# # protected methods for parent only
def _notify( self ):
""" notify self about file status change """
fStatus = set( self.fileStatusList() )
if fStatus == set( ['Failed'] ):
# All files Failed -> Failed
newStatus = 'Failed'
elif 'Scheduled' in fStatus:
newStatus = 'Scheduled'
elif "Waiting" in fStatus:
newStatus = 'Queued'
elif 'Failed' in fStatus:
newStatus = 'Failed'
else:
self.Error = ''
newStatus = 'Done'
# If the status moved to Failed or Done, update the lastUpdate time
if newStatus in ('Failed', 'Done', 'Scheduled'):
if self._Status != newStatus:
self._LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
self._Status = newStatus
if self._parent:
self._parent._notify()
def _setQueued( self, caller ):
""" don't touch """
if caller == self._parent:
self._Status = "Queued"
def _setWaiting( self, caller ):
""" don't touch as well """
if caller == self._parent:
self._Status = "Waiting"
# # Files arithmetics
def __contains__( self, opFile ):
""" in operator """
return opFile in self.__files__
def __iadd__( self, opFile ):
""" += operator """
if len( self ) >= Operation.MAX_FILES:
raise RuntimeError( "too many Files in a single Operation" )
self.addFile( opFile )
return self
def addFile( self, opFile ):
""" add :opFile: to operation
.. warning::
You cannot add a File object that has already been added to another operation. They must be different objects
"""
if len( self ) >= Operation.MAX_FILES:
raise RuntimeError( "too many Files in a single Operation" )
if opFile not in self:
self.__files__.append( opFile )
opFile._parent = self
self._notify()
# # helpers for looping
def __iter__( self ):
""" files iterator """
return self.__files__.__iter__()
def __getitem__( self, i ):
""" [] op for opFiles """
return self.__files__.__getitem__( i )
def __delitem__( self, i ):
""" remove file from op, only if OperationID is NOT set """
self.__files__.__delitem__( i )
self._notify()
def __setitem__( self, i, opFile ):
""" overwrite opFile """
self.__files__.__setitem__( i, opFile )
opFile._parent = self
self._notify()
def fileStatusList( self ):
""" get list of files statuses """
return [ subFile.Status for subFile in self ]
def __nonzero__( self ):
""" for comparisons
"""
return True
def __len__( self ):
""" nb of subFiles """
return len( self.__files__ )
@property
def sourceSEList( self ):
""" helper property returning source SEs as a list"""
return self.SourceSE.split( "," ) if self.SourceSE else ['']
@property
def targetSEList( self ):
""" helper property returning target SEs as a list"""
return self.TargetSE.split( "," ) if self.TargetSE else ['']
@property
def Catalog( self ):
""" catalog prop """
return self._Catalog
@Catalog.setter
def Catalog( self, value ):
""" catalog setter """
if type( value ) not in ( str, unicode, list ):
raise TypeError( "wrong type for value" )
if type( value ) in ( str, unicode ):
value = value.split( ',' )
value = ",".join( list ( set ( [ str( item ).strip() for item in value if str( item ).strip() ] ) ) )
if len( value ) > 255:
raise ValueError( "Catalog list too long" )
self._Catalog = value.encode() if value else ""
@property
def catalogList( self ):
""" helper property returning catalogs as list """
return self._Catalog.split( "," ) if self._Catalog else []
@property
def Status( self ):
""" Status prop """
return self._Status
@Status.setter
def Status( self, value ):
""" Status setter """
if value not in Operation.ALL_STATES:
raise ValueError( "unknown Status '%s'" % str( value ) )
if self.__files__:
self._notify()
else:
# If the status moved to Failed or Done, update the lastUpdate time
if value in ( 'Failed', 'Done' ):
if self._Status != value:
self._LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
self._Status = value
if self._parent:
self._parent._notify()
if self._Status == 'Done':
self.Error = ''
@property
def Order( self ):
""" order prop """
if self._parent:
self._Order = self._parent.indexOf( self ) if self._parent else -1
return self._Order
@Order.setter
def Order( self, value ):
""" order prop """
self._Order = value
@property
def CreationTime( self ):
""" operation creation time prop """
return self._CreationTime
@CreationTime.setter
def CreationTime( self, value = None ):
""" creation time setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "CreationTime should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._CreationTime = value
@property
def SubmitTime( self ):
""" subrequest's submit time prop """
return self._SubmitTime
@SubmitTime.setter
def SubmitTime( self, value = None ):
""" submit time setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "SubmitTime should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._SubmitTime = value
@property
def LastUpdate( self ):
""" last update prop """
return self._LastUpdate
@LastUpdate.setter
def LastUpdate( self, value = None ):
""" last update setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "LastUpdate should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._LastUpdate = value
if self._parent:
self._parent.LastUpdate = value
def __str__( self ):
""" str operator """
return self.toJSON()['Value']
def toJSON( self ):
""" Returns the JSON description string of the Operation """
try:
jsonStr = json.dumps( self, cls = RMSEncoder )
return S_OK( jsonStr )
except Exception as e:
return S_ERROR( str( e ) )
def _getJSONData( self ):
""" Returns the data that have to be serialized by JSON """
jsonData = {}
for attrName in Operation.ATTRIBUTE_NAMES:
# RequestID and OperationID might not be set since they are managed by SQLAlchemy
if not hasattr(self, attrName):
continue
value = getattr(self, attrName)
if isinstance(value, datetime.datetime):
# We convert date time to a string
jsonData[attrName] = value.strftime(self._datetimeFormat) # pylint: disable=no-member
else:
jsonData[attrName] = value
jsonData['Files'] = self.__files__
return jsonData
| fstagni/DIRAC | RequestManagementSystem/Client/Operation.py | Python | gpl-3.0 | 10,442 | [
"DIRAC"
] | 3d7f83c6e66ad9c7ef4ba25562cb4f635ef10460e361a5e54192a6a7101b7489 |
import lb_loader
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
collision_rate = 1.0 / u.picoseconds
n_steps = 1500
temperature = 300. * u.kelvin
system, positions = lb_loader.load_lb()
hmc_integrators.guess_force_groups(system)
positions = lb_loader.pre_equil(system, positions, temperature)
def test_hmc(timestep, steps_per_hmc):
timestep = timestep * u.femtoseconds
integrator = hmc_integrators.GHMC2(temperature, steps_per_hmc, timestep)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(n_steps)
return integrator.acceptance_rate
timestep_list = np.linspace(1.5, 2.25, 3)
steps_per_hmc_list = np.array([5, 10, 25])
#steps_per_hmc_list = np.array([10, 5, 25])
data = []
for i, timestep in enumerate(timestep_list):
for j, steps_per_hmc in enumerate(steps_per_hmc_list):
print(i, j, timestep, steps_per_hmc)
acceptance = test_hmc(timestep, steps_per_hmc)
data.append(dict(acceptance=acceptance, timestep=timestep, steps_per_hmc=steps_per_hmc, normalized=timestep * acceptance))
print(data[-1])
data = pd.DataFrame(data)
acceptance = data.pivot("timestep", "steps_per_hmc", "acceptance")
normalized = data.pivot("timestep", "steps_per_hmc", "normalized")
acceptance
normalized
"""
In [25]: acceptance
Out[25]:
steps_per_hmc 5 10 25
timestep
1.500 0.791333 0.760 0.664000
1.875 0.694000 0.724 0.666667
2.250 0.601333 0.680 0.634000
In [26]: normalized
Out[26]:
steps_per_hmc 5 10 25
timestep
1.500 1.18700 1.1400 0.9960
1.875 1.30125 1.3575 1.2500
2.250 1.35300 1.5300 1.4265
"""
| kyleabeauchamp/HMCNotes | code/old/test_ghmc.py | Python | gpl-2.0 | 1,915 | [
"OpenMM"
] | 8bd607eb8279999a3b9f886c5d343a0437148d3295c820eb23bacfd6e435ad92 |
'''
Created on Jul 31, 2012
@author: arenduchintala
'''
import math
import pickle
import numpy
from scipy import stats
def dbgprint(*args):
return
class GaussianNaiveBayes(object):
'''
classdocs
gaussian naive bayes classifier
a single training instance tuple:
(label, [(feature,value), (feature,value), (feature,value)... ])
a single test instance tuple:
(0, [(feature,value), (feature,value), (feature,value)... ])
*zero values can be ignored in test
data types:
label preferred type int
feature preferred type int
value (weight of a feature) preferred type int/float
'''
def __init__(self, null_value):
"""
Constructor
"""
self.classmodels_count = {}
self.classmodels = {}
self.classmodelsMeanAndVariance = {}
self.featureTokenCount = 0
self.featureTypeCount = 0
self.null_value = null_value
def train(self, training_vecs):
for item in training_vecs:
current_class = item[0]
feature_vector = item[1]
dbgprint("Current class: ", current_class, "feature_vector: ", feature_vector)
if current_class in self.classmodels:
current_class_model = self.classmodels[current_class]
self.classmodels_count[current_class] += 1
else:
current_class_model = {}
self.classmodels_count[current_class] = 1
for feature, value in enumerate(feature_vector):
if float(value) == float(self.null_value): # TODO
continue
if feature in current_class_model:
list_of_values = current_class_model[feature]
else:
list_of_values = []
list_of_values.append(value)
current_class_model[feature] = list_of_values
self.classmodels[current_class] = current_class_model
for a_class in self.classmodels.keys():
a_class_model = self.classmodels[a_class]
a_class_model_mean_and_variance = {}
for feature in a_class_model.keys():
# mean = numpy.array(a_class_model[feature]).mean()
# std = numpy.array(a_class_model[feature]).std()
classmodelfeatures = map(float, a_class_model[feature])
mean = numpy.mean(classmodelfeatures)
std = numpy.std(classmodelfeatures)
#limit the standard deviation to a minimum of 0.1
minimum = 1
std = (std, minimum)[std < minimum]
a_class_model_mean_and_variance[feature] = (mean, std)
self.classmodelsMeanAndVariance[a_class] = a_class_model_mean_and_variance
def classify(self, feature_vec):
outputs = []
class_model_output_prob = {}
for a_class in self.classmodelsMeanAndVariance.keys():
a_class_output_prob = 0.0
a_class_model_mean_and_variance = self.classmodelsMeanAndVariance[a_class]
for feature, value in enumerate(feature_vec):
#simply ignore a feature if its not seen in training
if float(value) == float(self.null_value): # TODO
continue
if feature in a_class_model_mean_and_variance:
feature_mean = a_class_model_mean_and_variance[feature][0]
feature_std = a_class_model_mean_and_variance[feature][1]
dbgprint("value:", float(value), "mean:", feature_mean, "std:", feature_std)
dbgprint("pdf:", stats.norm.pdf(float(value), feature_mean, feature_std))
prob = max(0.0001, stats.norm.pdf(float(value), feature_mean, feature_std))
a_class_output_prob += math.log10(prob)
#ignoring P(class) prior.. assuming equal priors
class_model_output_prob[a_class_output_prob] = a_class
probs = class_model_output_prob.keys()
probs.sort(reverse=True)
for prob in probs:
outputs.append(class_model_output_prob[prob])
return outputs
def saveModel(self, filename):
output_file = open(filename, 'wb')
pickle.dump((self.classmodels_count, self.classmodelsMeanAndVariance), output_file)
output_file.flush()
output_file.close()
def loadModel(self, filename):
input_file = open(filename, 'rb')
items = pickle.load(input_file)
self.classmodels_count = items[0]
self.classmodelsMeanAndVariance = items[1]
| alexf101/indoor-tracking | FingerprintsREST/views/MatchLocation/GaussianNB.py | Python | gpl-2.0 | 4,630 | [
"Gaussian"
] | be46ab4d000478ee61dbd09fb02ba7071973508fd8c301c73bd65f05863073d3 |
import numpy as np
import cvxopt as co
def load_mnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.MNIST(root='../data/mnist', train=True, download=True, transform=None)
mnist_test = datasets.MNIST(root='../data/mnist', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_fashion_mnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.FashionMNIST(root='../data/fashion-mnist', train=True, download=True, transform=None)
mnist_test = datasets.FashionMNIST(root='../data/fashion-mnist', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_emnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.EMNIST(root='../data/emnist', split='balanced', train=True, download=True, transform=None)
mnist_test = datasets.EMNIST(root='../data/emnist', split='balanced', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_cifar10_dataset():
import torchvision.datasets as datasets
cifar_train = datasets.CIFAR10(root='../data/cifar10', train=True, download=True, transform=None)
cifar_test = datasets.CIFAR10(root='../data/cifar10', train=False, download=True, transform=None)
test_labels = np.array([cifar_test[i][1] for i in range(len(cifar_test))], dtype=np.int)
train_labels = np.array([cifar_train[i][1] for i in range(len(cifar_train))], dtype=np.int)
test = np.array([np.asarray(cifar_test[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_test))], dtype=np.float)
train = np.array([np.asarray(cifar_train[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [32, 32]
def get_gaussian(num, dims=2, means=[0,0], vars=[1,1]):
data = np.random.multivariate_normal(means, np.eye(dims), num)
return data
def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
seqs = np.zeros((dims, lens))
lbls = np.zeros((1, lens), dtype=np.int8)
marker = 0
# generate first state sequence
for d in range(dims):
seqs[d, :] = np.random.randn(lens)*vars1[d] + means1[d]
prob = np.random.uniform()
if prob < anom_prob:
# add second state blocks
while True:
max_block_len = 0.6*lens
min_block_len = 0.1*lens
block_len = np.int(max_block_len*np.random.uniform()+3)
block_start = np.int(lens*np.random.uniform())
if block_len - (block_start+block_len-lens)-3 > min_block_len:
break
block_len = min( [block_len, block_len - (block_start+block_len-lens)-3] )
lbls[block_start:block_start+block_len-1] = 1
marker = 1
for d in range(dims):
seqs[d,block_start:block_start+block_len-1] = np.random.randn(1,block_len-1)*vars2[d] + means2[d]
return seqs, lbls, marker
def get_2state_anom_seq(lens, comb_block_len, anom_prob=1.0, num_blocks=1):
seqs = co.matrix(0.0, (1, lens))
lbls = co.matrix(0, (1, lens))
marker = 0
# generate first state sequence, gaussian noise 0=mean, 1=variance
seqs = np.zeros((1, lens))
lbls = np.zeros((1, lens))
bak = seqs.copy()
prob = np.random.uniform()
if prob < anom_prob:
# add second state blocks
block_len = np.int(np.floor(comb_block_len / float(num_blocks)))
marker = 1
# add a single block
blen = 0
for b in range(np.int(num_blocks)):
if (b==num_blocks-1 and b>1):
block_len = np.round(comb_block_len-blen)
isDone = False
while isDone == False:
start = np.int(np.random.uniform()*float(lens-block_len+1))
if np.sum(lbls[0,start:start+block_len]) == 0:
lbls[0, start:start+block_len] = 1
seqs[0, start:start+block_len] = bak[0, start:start+block_len]+4.0
isDone = True
break
blen += block_len
return seqs, lbls, marker
| nicococo/tilitools | tilitools/utils_data.py | Python | mit | 5,881 | [
"Gaussian"
] | 2756fc10b985b244ecb6c0364438df37acbacd9ebb2911705234501461f1e30a |
# -------------------------------------------------------------------------
# Name: Land Cover Type module
# Purpose:
#
# Author: PB
#
# Created: 15/07/2016
# Copyright: (c) PB 2016
# -------------------------------------------------------------------------
from cwatm.management_modules.data_handling import *
class landcoverType(object):
"""
LAND COVER TYPE
runs the 6 land cover types through soil procedures
This routine calls the soil routine for each land cover type
**Global variables**
==================== ================================================================================ =========
Variable [self.var] Description Unit
==================== ================================================================================ =========
load_initial
sum_gwRecharge groundwater recharge m
modflow Flag: True if modflow_coupling = True in settings file --
modflow_timestep Chosen ModFlow model timestep (1day, 7days, 30days…)
sumed_sum_gwRecharge
GWVolumeVariation
snowEvap total evaporation from snow for a snow layers m
maxGWCapRise influence of capillary rise above groundwater level m
minInterceptCap Maximum interception read from file for forest and grassland land cover m
interceptStor simulated vegetation interception storage m
dynamicLandcover
landcoverSum
act_SurfaceWaterAbst
sum_interceptStor Total of simulated vegetation interception storage including all landcover types m
fracVegCover Fraction of area covered by the corresponding landcover type
minCropKC minimum crop factor (default 0.2) --
minTopWaterLayer
rootFraction1
maxRootDepth
rootDepth
KSat1
KSat2
KSat3
alpha1
alpha2
alpha3
lambda1
lambda2
lambda3
thetas1
thetas2
thetas3
thetar1
thetar2
thetar3
genuM1
genuM2
genuM3
genuInvM1
genuInvM2
genuInvM3
genuInvN1
genuInvN2
genuInvN3
invAlpha1
invAlpha2
invAlpha3
ws1 Maximum storage capacity in layer 1 m
ws2 Maximum storage capacity in layer 2 m
ws3 Maximum storage capacity in layer 3 m
wres1 Residual storage capacity in layer 1 m
wres2 Residual storage capacity in layer 2 m
wres3 Residual storage capacity in layer 3 m
wrange1
wrange2
wrange3
wfc1 Soil moisture at field capacity in layer 1
wfc2 Soil moisture at field capacity in layer 2
wfc3 Soil moisture at field capacity in layer 3
wwp1 Soil moisture at wilting point in layer 1
wwp2 Soil moisture at wilting point in layer 2
wwp3 Soil moisture at wilting point in layer 3
kUnSat3FC
kunSatFC12
kunSatFC23
cropCoefficientNC_fi
interceptCapNC_filen
coverFractionNC_file
sum_topwater quantity of water on the soil (flooding) (weighted sum for all landcover types) m
sum_soil
sum_w1
sum_w2
sum_w3
totalSto Total soil,snow and vegetation storage for each cell including all landcover typ m
arnoBetaOro
arnoBeta
adjRoot
maxtopwater maximum heigth of topwater m
totAvlWater
presumed_sum_gwRecha Previous groundwater recharge [m/timestep] (used for the ModFlow version) m
pretotalSto Previous totalSto m
sum_actBareSoilEvap
sum_openWaterEvap
addtoevapotrans
sum_runoff Runoff above the soil, more interflow, including all landcover types m
sum_directRunoff
sum_interflow
sum_availWaterInfilt
sum_capRiseFromGW capillar rise from groundwater to 3rd soil layer (summed up for all land cover c m
sum_act_irrConsumpti
sum_perc3toGW percolation from 3rd soil layer to groundwater (summed up for all land cover cla m
sum_prefFlow preferential flow from soil to groundwater (summed up for all land cover classes m
act_irrWithdrawal
act_nonIrrConsumptio
returnFlow
cellArea Cell area [m²] of each simulated mesh
baseflow simulated baseflow (= groundwater discharge to river) m
Precipitation Precipitation (input for the model) m
coverTypes land cover types - forest - grassland - irrPaddy - irrNonPaddy - water - sealed --
Rain Precipitation less snow m
SnowMelt total snow melt from all layers m
SnowCover snow cover (sum over all layers) m
ElevationStD
prevSnowCover snow cover of previous day (only for water balance) m
soilLayers Number of soil layers --
soildepth Thickness of the first soil layer m
soildepth12 Total thickness of layer 2 and 3 m
w1 Simulated water storage in the layer 1 m
w2 Simulated water storage in the layer 2 m
w3 Simulated water storage in the layer 3 m
topwater quantity of water above the soil (flooding) m
totalET Total evapotranspiration for each cell including all landcover types m
sum_actTransTotal
sum_interceptEvap
==================== ================================================================================ =========
**Functions**
"""
def __init__(self, model):
self.var = model.var
self.model = model
# noinspection PyTypeChecker
def initial(self):
"""
Initial part of the land cover type module
Initialise the six land cover types
* Forest No.0
* Grasland/non irrigated land No.1
* Paddy irrigation No.2
* non-Paddy irrigation No.3
* Sealed area No.4
* Water covered area No.5
And initialize the soil variables
"""
# make land cover change from year to year or fix it to 1 year
if returnBool('dynamicLandcover'):
self.var.dynamicLandcover = True
else:
self.var.dynamicLandcover = False
self.var.coverTypes= list(map(str.strip, cbinding("coverTypes").split(",")))
landcoverAll = ['fracVegCover','interceptStor','interceptCap','availWaterInfiltration','interceptEvap',
'directRunoff', 'openWaterEvap']
for variable in landcoverAll: vars(self.var)[variable] = np.tile(globals.inZero, (6, 1))
landcoverPara = ['minInterceptCap','cropDeplFactor','rootFraction1',
'maxRootDepth', 'topWaterLayer','interflow',
'cropCoefficientNC_filename', 'interceptCapNC_filename','coverFractionNC_filename']
# arrays stored as list not as numpy, because it can contain strings, single parameters or arrays
# list is filled with append afterwards
for variable in landcoverPara: vars(self.var)[variable] = []
# fraction (m2) of a certain irrigation type over (only) total irrigation area ; will be assigned by the landSurface module
# output variable per land cover class
landcoverVars = ['irrTypeFracOverIrr','fractionArea','totAvlWater','cropKC',
'effSatAt50', 'effPoreSizeBetaAt50', 'rootZoneWaterStorageMin','rootZoneWaterStorageRange',
'totalPotET','potTranspiration','soilWaterStorage',
'infiltration','actBareSoilEvap','landSurfaceRunoff','actTransTotal',
'gwRecharge','interflow','actualET','pot_irrConsumption','act_irrConsumption','irrDemand',
'topWaterLayer',
'perc3toGW','capRiseFromGW','netPercUpper','netPerc','prefFlow']
# for 6 landcover types
for variable in landcoverVars: vars(self.var)[variable] = np.tile(globals.inZero,(6,1))
#for 4 landcover types with soil underneath
landcoverVarsSoil = ['arnoBeta','rootZoneWaterStorageCap','rootZoneWaterStorageCap12','perc1to2','perc2to3','theta1','theta2','theta3']
for variable in landcoverVarsSoil: vars(self.var)[variable] = np.tile(globals.inZero,(4,1))
soilVars = ['adjRoot','perc','capRise','rootDepth','storCap']
# For 3 soil layers and 4 landcover types
for variable in soilVars: vars(self.var)[variable]= np.tile(globals.inZero,(self.var.soilLayers,4,1))
# set aggregated storages to zero
self.var.landcoverSum = ['interceptStor', 'interflow',
'directRunoff', 'totalPotET', 'potTranspiration', 'availWaterInfiltration',
'interceptEvap', 'infiltration', 'actBareSoilEvap', 'landSurfaceRunoff', 'actTransTotal', 'gwRecharge',
'openWaterEvap','capRiseFromGW','perc3toGW','prefFlow', 'actualET', 'act_irrConsumption']
for variable in self.var.landcoverSum: vars(self.var)["sum_"+variable] = globals.inZero.copy()
# for three soil layers
soilVars = ['w1','w2','w3']
for variable in soilVars: vars(self.var)[variable] = np.tile(globals.inZero,(4,1))
for variable in soilVars: vars(self.var)["sum_" + variable] = globals.inZero.copy()
self.var.totalET = globals.inZero.copy()
self.var.act_SurfaceWaterAbstract = globals.inZero.copy()
# ----------------------------------------------------------
# Load initial values and calculate basic soil parameters which are not changed in time
self.dynamic_fracIrrigation(init=True, dynamic = True)
i = 0
for coverType in self.var.coverTypes:
self.var.minInterceptCap.append(loadmap(coverType + "_minInterceptCap"))
# init values
if coverType in ['forest', 'grassland', 'irrPaddy', 'irrNonPaddy','sealed']:
self.var.interceptStor[i] = self.var.load_initial(coverType + "_interceptStor")
# summarize the following initial storages:
self.var.sum_interceptStor += self.var.fracVegCover[i] * self.var.interceptStor[i]
i += 1
self.var.minCropKC= loadmap('minCropKC')
self.var.minTopWaterLayer = loadmap("minTopWaterLayer")
self.var.maxGWCapRise = loadmap("maxGWCapRise")
i = 0
for coverType in self.var.coverTypes[:4]:
# other paramater values
# b coefficient of soil water storage capacity distribution
#self.var.minTopWaterLayer.append(loadmap(coverType + "_minTopWaterLayer"))
#self.var.minCropKC.append(loadmap(coverType + "_minCropKC"))
#self.var.minInterceptCap.append(loadmap(coverType + "_minInterceptCap"))
#self.var.cropDeplFactor.append(loadmap(coverType + "_cropDeplFactor"))
# parameter values
self.var.rootFraction1.append(loadmap(coverType + "_rootFraction1"))
#self.var.rootFraction2.append(loadmap(coverType + "_rootFraction2"))
soildepth_factor = loadmap('soildepth_factor')
self.var.maxRootDepth.append(loadmap(coverType + "_maxRootDepth")* soildepth_factor)
i += 1
i = 0
for coverType in self.var.coverTypes[:4]:
# calculate rootdepth for each soillayer and each land cover class
# self.var.rootDepth[0][i] = np.minimum(self.var.soildepth[0], self.var.maxRootDepth[i])
self.var.rootDepth[0][i] = self.var.soildepth[0].copy() # 0.05 m
# if land cover = forest
if coverType != 'grassland':
# soil layer 1 = root max of land cover - first soil layer
h1 = np.maximum(self.var.soildepth[1], self.var.maxRootDepth[i] - self.var.soildepth[0])
#
self.var.rootDepth[1][i] = np.minimum(self.var.soildepth12 - 0.05, h1)
# soil layer is minimim 0.05 m
self.var.rootDepth[2][i] = np.maximum(0.05, self.var.soildepth12 - self.var.rootDepth[1][i])
else:
self.var.rootDepth[1][i] = self.var.soildepth[1].copy()
self.var.rootDepth[2][i] = self.var.soildepth[2].copy()
i += 1
soilVars1 = ['KSat1','KSat2','KSat3','alpha1','alpha2','alpha3', 'lambda1','lambda2','lambda3','thetas1','thetas2','thetas3','thetar1','thetar2','thetar3']
for variable in soilVars1: vars(self.var)[variable] = []
i = 0
for coverType in self.var.coverTypes[:2]:
if i==0:
pre = coverType + "_"
else:
pre = ""
# ksat in cm/d-1 -> m/dm
self.var.KSat1.append((loadmap(pre + "KSat1"))/100)
self.var.KSat2.append((loadmap(pre + "KSat2"))/100)
self.var.KSat3.append((loadmap(pre + "KSat3"))/100)
self.var.alpha1.append((loadmap(pre + "alpha1")))
self.var.alpha2.append((loadmap(pre + "alpha2")))
self.var.alpha3.append((loadmap(pre + "alpha3")))
self.var.lambda1.append((loadmap(pre + "lambda1")))
self.var.lambda2.append((loadmap(pre + "lambda2")))
self.var.lambda3.append((loadmap(pre + "lambda3")))
self.var.thetas1.append((loadmap(pre + "thetas1")))
self.var.thetas2.append((loadmap(pre + "thetas2")))
self.var.thetas3.append((loadmap(pre + "thetas3")))
self.var.thetar1.append((loadmap(pre + "thetar1")))
self.var.thetar2.append((loadmap(pre + "thetar2")))
self.var.thetar3.append((loadmap(pre + "thetar3")))
i += 1
# Van Genuchten n and m coefficients
# GenuN1=Lambda+1
with np.errstate(invalid='ignore', divide='ignore'):
genuN1 = [x + 1 for x in self.var.lambda1] # unit [-]
genuN2 = [x + 1 for x in self.var.lambda2]
genuN3 = [x + 1 for x in self.var.lambda3]
# self.var.GenuM1=Lambda1/GenuN1
self.var.genuM1 = [x / y for x, y in zip(self.var.lambda1, genuN1)]
self.var.genuM2 = [x / y for x, y in zip(self.var.lambda2, genuN2)]
self.var.genuM3 = [x / y for x, y in zip(self.var.lambda3, genuN3)]
# self.var.GenuInvM1=1/self.var.GenuM1
self.var.genuInvM1 = [1 / x for x in self.var.genuM1]
self.var.genuInvM2 = [1 / x for x in self.var.genuM2]
self.var.genuInvM3 = [1 / x for x in self.var.genuM3]
# self.var.GenuInvN1=1/GenuN1
self.var.genuInvN1 = [1 / x for x in genuN1]
self.var.genuInvN2 = [1 / x for x in genuN2]
self.var.genuInvN3 = [1 / x for x in genuN3]
soilVars2 = ['ws1','ws2','ws3','wres1','wres2','wres3','wrange1','wrange2','wrange3','wfc1','wfc2','wfc3','wwp1','wwp2','wwp3','kunSatFC12','kunSatFC23']
for variable in soilVars2: vars(self.var)[variable] = []
i = 0
for coverType in self.var.coverTypes[:4]:
j = 0
if coverType != "forest": j = 1
self.var.ws1.append(self.var.thetas1[j] * self.var.rootDepth[0][i]) # unit [m]
self.var.ws2.append(self.var.thetas2[j] * self.var.rootDepth[1][i])
self.var.ws3.append(self.var.thetas3[j] * self.var.rootDepth[2][i])
self.var.wres1.append(self.var.thetar1[j] * self.var.rootDepth[0][i]) # unit [m] because of rootDepth [m]
self.var.wres2.append(self.var.thetar2[j] * self.var.rootDepth[1][i])
self.var.wres3.append(self.var.thetar3[j] * self.var.rootDepth[2][i])
self.var.wrange1.append(self.var.ws1[i] - self.var.wres1[i]) # unit [m]
self.var.wrange2.append(self.var.ws2[i] - self.var.wres2[i])
self.var.wrange3.append(self.var.ws3[i] - self.var.wres3[i])
# Soil moisture at field capacity (pF2, 100 cm) [cm water slice] # Mualem equation (van Genuchten, 1980)
# see https://en.wikipedia.org/wiki/Water_retention_curve
# alpha in 1/cm * cm water slice e.g. 10**4.2 around 15000 cm water slice for wilting point
self.var.wfc1.append(self.var.wres1[i] + self.var.wrange1[i] / ((1 + (self.var.alpha1[j] * 100) ** genuN1[j]) ** self.var.genuM1[j]))
self.var.wfc2.append(self.var.wres2[i] + self.var.wrange2[i] / ((1 + (self.var.alpha2[j] * 100) ** genuN2[j]) ** self.var.genuM2[j]))
self.var.wfc3.append(self.var.wres3[i] + self.var.wrange3[i] / ((1 + (self.var.alpha3[j] * 100) ** genuN3[j]) ** self.var.genuM3[j]))
# Soil moisture at wilting point (pF4.2, 10**4.2 cm) [cm water slice] # Mualem equation (van Genuchten, 1980)
self.var.wwp1.append(self.var.wres1[i] + self.var.wrange1[i] / ((1 + (self.var.alpha1[j] * (10**4.2)) ** genuN1[j]) ** self.var.genuM1[j])) # unit [m]
self.var.wwp2.append(self.var.wres2[i] + self.var.wrange2[i] / ((1 + (self.var.alpha2[j] * (10**4.2)) ** genuN2[j]) ** self.var.genuM2[j]))
self.var.wwp3.append(self.var.wres3[i] + self.var.wrange3[i] / ((1 + (self.var.alpha3[j] * (10**4.2)) ** genuN3[j]) ** self.var.genuM3[j]))
satTerm1FC = np.maximum(0., self.var.wfc1[i] - self.var.wres1[i]) / self.var.wrange1[i] # unit [-]
satTerm2FC = np.maximum(0., self.var.wfc2[i] - self.var.wres2[i]) / self.var.wrange2[i]
satTerm3FC = np.maximum(0., self.var.wfc3[i] - self.var.wres3[i]) / self.var.wrange3[i]
# van Genuchten, Mualem equation see https://acsess.onlinelibrary.wiley.com/doi/epdf/10.2136/sssaj2000.643843x
# with Mualem (1976) L = 0.5 -> np.sqrt(satTerm2FC)
kUnSat1FC = self.var.KSat1[j] * np.sqrt(satTerm1FC) * np.square(1 - (1 - satTerm1FC ** self.var.genuInvM1[j]) ** self.var.genuM1[j])
kUnSat2FC = self.var.KSat2[j] * np.sqrt(satTerm2FC) * np.square(1 - (1 - satTerm2FC ** self.var.genuInvM2[j]) ** self.var.genuM2[j])
self.var.kUnSat3FC = self.var.KSat3[j] * np.sqrt(satTerm3FC) * np.square(1 - (1 - satTerm3FC ** self.var.genuInvM3[j]) ** self.var.genuM3[j])
self.var.kunSatFC12.append(np.sqrt(kUnSat1FC * kUnSat2FC))
self.var.kunSatFC23.append(np.sqrt(kUnSat2FC * self.var.kUnSat3FC))
i += 1
i = 0
for coverType in self.var.coverTypes[:4]:
# other paramater values
# b coefficient of soil water storage capacity distribution
#self.var.minTopWaterLayer.append(loadmap(coverType + "_minTopWaterLayer"))
#self.var.minCropKC.append(loadmap(coverType + "_minCropKC"))
#self.var.minInterceptCap.append(loadmap(coverType + "_minInterceptCap"))
#self.var.cropDeplFactor.append(loadmap(coverType + "_cropDeplFactor"))
# parameter values
self.var.rootFraction1.append(loadmap(coverType + "_rootFraction1"))
#self.var.rootFraction2 = self.var.rootFraction1
self.var.maxRootDepth.append(loadmap(coverType + "_maxRootDepth"))
# store filenames
self.var.cropCoefficientNC_filename.append(coverType + "_cropCoefficientNC")
self.var.interceptCapNC_filename.append(coverType + "_interceptCapNC")
self.var.coverFractionNC_filename.append(coverType + "_coverFractionNC")
# init values
#self.var.interflow[i] = self.var.load_initial(coverType + "_interflow")
self.var.w1[i] = self.var.load_initial(coverType + "_w1",default = self.var.wwp1[i])
self.var.w2[i] = self.var.load_initial(coverType + "_w2",default = self.var.wwp2[i])
self.var.w3[i] = self.var.load_initial(coverType + "_w3",default = self.var.wwp3[i])
soilVars = ['w1', 'w2', 'w3']
for variable in soilVars:
vars(self.var)["sum_" + variable] = globals.inZero.copy()
for No in range(4):
vars(self.var)["sum_" + variable] += self.var.fracVegCover[No] * vars(self.var)[variable][No]
# for paddy irrigation flooded paddy fields
self.var.topwater = self.var.load_initial("topwater", default= 0.) * globals.inZero.copy()
self.var.sum_topwater = self.var.fracVegCover[2] * self.var.topwater
self.var.sum_soil = self.var.sum_w1 + self.var.sum_w2 + self.var.sum_w3 + self.var.sum_topwater
self.var.totalSto = self.var.SnowCover + self.var.sum_interceptStor + self.var.sum_soil
# Improved Arno's scheme parameters: Hageman and Gates 2003
# arnoBeta defines the shape of soil water capacity distribution curve as a function of topographic variability
# b = max( (oh - o0)/(oh + omax), 0.01)
# oh: the standard deviation of orography, o0: minimum std dev, omax: max std dev
self.var.arnoBetaOro = (self.var.ElevationStD - 10.0) / (self.var.ElevationStD + 1500.0)
# for CALIBRATION
self.var.arnoBetaOro = self.var.arnoBetaOro + loadmap('arnoBeta_add')
self.var.arnoBetaOro = np.minimum(1.2, np.maximum(0.01, self.var.arnoBetaOro))
self.var.arnoBeta[i] = self.var.arnoBetaOro + loadmap(coverType + "_arnoBeta")
self.var.arnoBeta[i] = np.minimum(1.2, np.maximum(0.01, self.var.arnoBeta[i]))
# Due to large rooting depths, the third (final) soil layer may be pushed to its minimum of 0.05 m.
# In such a case, it may be better to turn off the root fractioning feature, as there is limited depth
# in the third soil layer to hold water, while having a significant fraction of the rootss.
# TODO: Extend soil depths to match maximum root depths
rootFrac = np.tile(globals.inZero,(self.var.soilLayers,1))
fractionroot12 = self.var.rootDepth[0][i] / (self.var.rootDepth[0][i] + self.var.rootDepth[1][i] )
rootFrac[0] = fractionroot12 * self.var.rootFraction1[i]
rootFrac[1] = (1 - fractionroot12) * self.var.rootFraction1[i]
rootFrac[2] = 1.0 - self.var.rootFraction1[i]
if 'rootFrac' in binding:
if not checkOption('rootFrac'):
root_depth_sum = self.var.rootDepth[0][i] + self.var.rootDepth[1][i] + self.var.rootDepth[2][i]
for layer in range(3):
rootFrac[layer] = self.var.rootDepth[layer][i] / root_depth_sum
rootFracSum = np.sum(rootFrac,axis=0)
for soilLayer in range(self.var.soilLayers):
self.var.adjRoot[soilLayer][i] = rootFrac[soilLayer] / rootFracSum
i += 1
# for maximum of topwater flooding (default = 0.05m)
self.var.maxtopwater = 0.05
if "irrPaddy_maxtopwater" in binding:
self.var.maxtopwater = loadmap('irrPaddy_maxtopwater')
#self.var.landcoverSumSum = ['directRunoff', 'totalPotET', 'potTranspiration', "Precipitation", 'ETRef','gwRecharge','Runoff']
#for variable in self.var.landcoverSumSum:
# vars(self.var)["sumsum_" + variable] = globals.inZero.copy()
# for irrigation of non paddy -> No =3
totalWaterPlant1 = np.maximum(0., self.var.wfc1[3] - self.var.wwp1[3]) #* self.var.rootDepth[0][3]
totalWaterPlant2 = np.maximum(0., self.var.wfc2[3] - self.var.wwp2[3]) #* self.var.rootDepth[1][3]
#totalWaterPlant3 = np.maximum(0., self.var.wfc3[3] - self.var.wwp3[3]) * self.var.rootDepth[2][3]
self.var.totAvlWater = totalWaterPlant1 + totalWaterPlant2 #+ totalWaterPlant3
# --------------------------------------------------------------------------
def dynamic_fracIrrigation(self, init = False, dynamic = True):
"""
Dynamic part of the land cover type module
Calculating fraction of land cover
* loads the fraction of landcover for each year from netcdf maps
* calculate the fraction of 6 land cover types based on the maps
:param init: (optional) True: set for the first time of a run
:param dynamic: used in the dynmic run not in the initial phase
:return: -
"""
#if checkOption('includeIrrigation') and checkOption('dynamicIrrigationArea'):
# updating fracVegCover of landCover (for historical irrigation areas, done at yearly basis)
# if first day of the year or first day of run
if init and dynamic:
if self.var.dynamicLandcover:
landcoverYear = dateVar['currDate']
else:
landcoverYear = datetime.datetime(int(binding['fixLandcoverYear']), 1, 1)
i = 0
for coverType in self.var.coverTypes:
self.var.fracVegCover[i] = readnetcdf2('fractionLandcover', landcoverYear, useDaily="yearly", value= 'frac'+coverType)
i += 1
# for Xiaogang's agent model
if "paddyfraction" in binding:
self.var.fracVegCover[2] = loadmap('paddyfraction')
self.var.fracVegCover[3] = loadmap('nonpaddyfraction')
# correction of grassland if sum is not 1.0
sum = np.sum(self.var.fracVegCover,axis=0)
self.var.fracVegCover[1] = np.maximum(0.,self.var.fracVegCover[1] + 1.0 - sum)
sum = np.sum(self.var.fracVegCover, axis=0)
self.var.fracVegCover[0] = np.maximum(0., self.var.fracVegCover[0] + 1.0 - sum)
sum = np.sum(self.var.fracVegCover,axis=0)
# sum of landcover without water and sealed
# self.var.sum_fracVegCover = np.sum(self.var.fracVegCover[0:4], axis=0)
# if irrigation is off every fraction of paddy and non paddy irrigation is put to land dcover 'grassland'
if not(checkOption('includeIrrigation')):
self.var.fracVegCover[1] = self.var.fracVegCover[1] + self.var.fracVegCover[2] + self.var.fracVegCover[3]
self.var.fracVegCover[2] = 0.0
self.var.fracVegCover[3] = 0.0
#self.var.fracVegCover[0] = self.var.fracVegCover[0] + self.var.fracVegCover[4]
#self.var.fracVegCover[1] = self.var.fracVegCover[1] + self.var.fracVegCover[5]
"""
self.var.fracVegCover[0] = 0.2 # forest
self.var.fracVegCover[1] = 0.2 # others (grassland)
self.var.fracVegCover[2] = 0.2 # paddy irrigation
self.var.fracVegCover[3] = 0.2 # non paddy irrigation
self.var.fracVegCover[4] = 0.1
self.var.fracVegCover[5] = 0.1
"""
# --------------------------------------------------------------------------
def dynamic(self):
"""
Dynamic part of the land cover type module
Calculating soil for each of the 6 land cover class
* calls evaporation_module.dynamic
* calls interception_module.dynamic
* calls soil_module.dynamic
* calls sealed_water_module.dynamic
And sums every thing up depending on the land cover type fraction
"""
#if (dateVar['curr'] == 15):
# ii=1
if checkOption('calcWaterBalance'):
preIntStor = self.var.sum_interceptStor.copy()
preStor1 = self.var.sum_w1.copy()
preStor2 = self.var.sum_w2.copy()
preStor3 = self.var.sum_w3.copy()
pretop = self.var.sum_topwater
### To compute water balance for modflow
if self.var.modflow:
if (dateVar['curr'] - int(dateVar['curr'] / self.var.modflow_timestep) * self.var.modflow_timestep) == 1 and \
dateVar['curr'] > self.var.modflow_timestep: # if it is the first step of the week
self.var.presumed_sum_gwRecharge = self.var.sumed_sum_gwRecharge.copy()
# stormodf = np.nansum((self.var.presumed_sum_gwRecharge/self.var.modflow_timestep-self.var.capillar-self.var.baseflow) * self.var.cellArea) # From ModFlow during the previous step
# stormodf = self.var.GWVolumeVariation / self.var.modflow_timestep # GW volume change from the previous ModFlow run (difference betwwen water levels times porosity)
self.var.pretotalSto = self.var.totalSto.copy()
coverNo = 0
# update soil (loop per each land cover type):
for coverType in self.var.coverTypes:
if checkOption('includeIrrigation'):
usecovertype = 4 # include paddy and non paddy irrigation
else:
usecovertype = 2 # exclude irrigation
# calculate evaporation and transpiration for soil land cover types (not for sealed and water covered areas)
if coverNo < usecovertype:
self.model.evaporation_module.dynamic(coverType, coverNo)
self.model.interception_module.dynamic(coverType, coverNo)
coverNo += 1
# -----------------------------------------------------------
# Calculate water available for infiltration
# ********* WATER Demand *************************
self.model.waterdemand_module.dynamic()
# Calculate soil
coverNo = 0
for coverType in self.var.coverTypes:
if checkOption('includeIrrigation'):
usecovertype = 4 # include paddy and non paddy irrigation
else:
usecovertype = 2 # exclude irrgation
if coverNo < usecovertype:
self.model.soil_module.dynamic(coverType, coverNo)
if coverNo > 3:
# calculate for openwater and sealed area
self.model.sealed_water_module.dynamic(coverType, coverNo)
coverNo += 1
# aggregated variables by fraction of land cover
for variable in self.var.landcoverSum:
vars(self.var)["sum_" + variable] = globals.inZero.copy()
for No in range(6):
vars(self.var)["sum_" + variable] += self.var.fracVegCover[No] * vars(self.var)[variable][No]
#print "--", self.var.sum_directRunoff
soilVars = ['w1','w2','w3']
for variable in soilVars:
vars(self.var)["sum_" + variable] = globals.inZero.copy()
for No in range(4):
vars(self.var)["sum_" + variable] += self.var.fracVegCover[No] * vars(self.var)[variable][No]
self.var.sum_topwater = self.var.fracVegCover[2] * self.var.topwater
self.var.totalET = self.var.sum_actTransTotal + self.var.sum_actBareSoilEvap + self.var.sum_openWaterEvap + self.var.sum_interceptEvap + self.var.snowEvap + self.var.addtoevapotrans
# addtoevapotrans: part of water demand which is lost due to evaporation
self.var.sum_soil = self.var.sum_w1 + self.var.sum_w2 + self.var.sum_w3 + self.var.sum_topwater
self.var.totalSto = self.var.SnowCover + self.var.sum_interceptStor + self.var.sum_soil
self.var.sum_runoff = self.var.sum_directRunoff + self.var.sum_interflow
### Printing the soil+GW water balance (considering no pumping), without the surface part
#print('Date : ', dateVar['currDatestr'])
if checkOption('calcWaterBalance'):
if self.var.modflow:
if dateVar['curr'] > self.var.modflow_timestep: # from the second step
storcwat = np.sum((self.var.totalSto - self.var.pretotalSto) * self.var.cellArea) # Daily CWAT storage variations
cwatbudg = np.sum((self.var.Precipitation - self.var.sum_runoff - self.var.totalET + self.var.presumed_sum_gwRecharge / self.var.modflow_timestep - self.var.sum_gwRecharge - self.var.baseflow) * self.var.cellArea) # Inputs-Outputs (baseflow comes from the previous ModFlow model)
print('CWatM-ModFlow water balance error [%]: ',
round(100 * (cwatbudg - storcwat - self.var.GWVolumeVariation / self.var.modflow_timestep) /
(0.5 * cwatbudg + 0.5 * storcwat + 0.5 * self.var.GWVolumeVariation / self.var.modflow_timestep) * 100) / 100)
# --------------------------------------------------------------------
#if (dateVar['curr'] == 104):
# ii=1
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.Rain,self.var.SnowMelt], # In
[self.var.sum_availWaterInfiltration,self.var.sum_interceptEvap], # Out
[preIntStor], # prev storage
[self.var.sum_interceptStor],
"InterAll", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.sum_availWaterInfiltration,self.var.sum_capRiseFromGW, self.var.sum_act_irrConsumption], # In
[self.var.sum_directRunoff,self.var.sum_perc3toGW, self.var.sum_prefFlow,
self.var.sum_actTransTotal, self.var.sum_actBareSoilEvap,self.var.sum_openWaterEvap], # Out
[pretop,preStor1,preStor2,preStor3], # prev storage
[self.var.sum_w1, self.var.sum_w2, self.var.sum_w3,self.var.sum_topwater],
"Soil_sum1", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.Rain,self.var.SnowMelt, self.var.sum_act_irrConsumption], # In
[self.var.sum_directRunoff,self.var.sum_interflow,self.var.sum_gwRecharge,
self.var.sum_actTransTotal, self.var.sum_actBareSoilEvap,self.var.sum_openWaterEvap,self.var.sum_interceptEvap], # Out
[pretop,preStor1,preStor2,preStor3,preIntStor], # prev storage
[self.var.sum_w1, self.var.sum_w2, self.var.sum_w3,self.var.sum_interceptStor,self.var.sum_topwater],
"Soil_sum111", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.Precipitation, self.var.sum_act_irrConsumption], # In
[self.var.sum_directRunoff,self.var.sum_interflow,self.var.sum_gwRecharge,
self.var.sum_actTransTotal, self.var.sum_actBareSoilEvap,self.var.sum_openWaterEvap,self.var.sum_interceptEvap,self.var.snowEvap], # Out
[pretop,preStor1,preStor2,preStor3,preIntStor,self.var.prevSnowCover], # prev storage
[self.var.sum_w1, self.var.sum_w2, self.var.sum_w3,self.var.sum_interceptStor,self.var.SnowCover,self.var.sum_topwater],
"Soil_sum2", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.Precipitation, self.var.sum_act_irrConsumption], # In
[self.var.sum_directRunoff,self.var.sum_interflow,self.var.sum_gwRecharge,
self.var.sum_actTransTotal, self.var.sum_actBareSoilEvap, self.var.sum_openWaterEvap, self.var.sum_interceptEvap, self.var.snowEvap], # Out
[self.var.pretotalSto], # prev storage
[self.var.totalSto],
"Soil_sum2b", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.Precipitation,self.var.act_irrWithdrawal], # In
[self.var.sum_directRunoff,self.var.sum_interflow,self.var.sum_gwRecharge,
self.var.totalET,self.var.act_nonIrrConsumption,self.var.returnFlow ], # Out
[self.var.pretotalSto], # prev storage
[self.var.totalSto],
"Soil_sum3", False) # -> something wrong
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.Precipitation], # In
[self.var.sum_runoff,self.var.sum_gwRecharge,self.var.totalET ], # out
[self.var.pretotalSto], # prev storage
[self.var.totalSto],
"Soil_sum4", False)
#[self.var.waterWithdrawal], # In
#[self.var.sumirrConsumption, self.var.returnFlow, self.var.addtoevapotrans, nonIrruse], # Out
#a = decompress(self.var.sumsum_Precipitation)
#b = cellvalue(a,81,379)
#print self.var.sum_directRunoff
#report(decompress(self.var.sumsum_Precipitation), "c:\work\output\Prsum.map")
#report(decompress(self.var.sumsum_gwRecharge), "c:\work\output\gwrsum.map")
| CWatM/CWatM | cwatm/hydrological_modules/landcoverType.py | Python | gpl-3.0 | 46,001 | [
"NetCDF"
] | 77e656c06a4d4aaa0fcad9b9c01e1d17dab561307e75b80476d29bb2b7fa79f8 |
from django.conf.urls import patterns, include, url
from django.views.generic import DetailView, ListView
from tastypie.serializers import Serializer
from tastypie.api import Api
import neuroelectro.api
import inspect
from django.contrib import admin
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
urlpatterns = patterns('neuroelectro.views',
url(r'^accounts/login/$', 'login'),
url(r'^accounts/logout/$', 'logout'),
url(r'^admin_list_email/$', 'admin_list_email'),
url(r'^$', 'splash_page'),
url(r'^neuron/index/$', 'neuron_index'),
url(r'^neuron/(?P<neuron_id>\d+)/$', 'neuron_detail'),
url(r'^neuron/(?P<neuron_id>\d+)/data/$', 'neuron_data_detail'),
url(r'^ephys_prop/(?P<ephys_prop_id>\d+)/data/$', 'ephys_data_detail'),
url(r'^ephys_prop/index/$', 'ephys_prop_index'),
url(r'^ephys_prop/(?P<ephys_prop_id>\d+)/$', 'ephys_prop_detail'),
url(r'^data_table/(?P<data_table_id>\d+)/$', 'data_table_detail'),
url(r'^data_table/(?P<data_table_id>\d+)/no_annotation/$', 'data_table_detail_no_annotation'),
url(r'^article/(?P<article_id>\d+)/$', 'article_detail'),
url(r'^article_full_text/(?P<article_id>\d+)/$', 'article_full_text_detail'),
url(r'^article/(?P<article_id>\d+)/metadata/$', 'article_metadata'),
url(r'^ephys_concept_map/(?P<ephys_concept_map_id>\d+)/$', 'ephys_concept_map_detail'),
url(r'^concept_map_detail/(?P<data_table_id>\d+)/(?P<data_table_cell_id>\w+\-\d+)/$', 'concept_map_detail'),
url(r'^neuron/add/$', 'neuron_add'),
# url(r'^data_table/(?P<data_table_id>\d+)/remove/$', 'data_table_validate_all'),
url(r'^article/index/$', 'article_list'),
url(r'^article/metadata_index/$', 'article_metadata_list'),
url(r'^display_meta/$', 'display_meta'),
url(r'^neuron_search_form/$', 'neuron_search_form'),
url(r'^neuron_search_form/neuron_search/$', 'neuron_search'),
url(r'^neuron/clustering/$', 'neuron_clustering'),
url(r'^faqs/$', 'faqs'),
url(r'^contact_info/$', 'contact_info'),
#@url(r'^api/$', 'api'),
url(r'^api/docs/$', 'api_docs'),
url(r'^contribute/$', 'contribute'),
url(r'^unsubscribe/$', 'unsubscribe'),
url(r'^publications/$', 'publications'),
#url(r'^api/neuron_list/$', 'nlex_neuron_id_list'),
url(r'^ephys_prop/ontology/$', 'ephys_prop_ontology'),
#url(r'^data_table/(?P<data_table_id>\d+)/validate/view/$', 'data_table_detail_validate'),
url(r'^neuron_data_add/$', 'neuron_data_add'),
url(r'^mailing_list_form/$', 'mailing_list_form'),
url(r'^mailing_list_form_post/$', 'mailing_list_form_post'),
url(r'^nedm_comment_box/$', 'nedm_comment_box'),
#url(r'^weblog/', include('zinnia.urls')),
#url(r'^comments/', include('django.contrib.comments.urls')),
url(r'^ckeditor/', include('ckeditor.urls')),
# for curation interface
url(r'^curator_view/$', 'curator_view'),
url(r'^data_table/validate_list/$', 'data_table_to_validate_list'),
url(r'^concept_map/validate_list/$', 'concept_map_to_validate_list'),
url(r'^data_table/expert_list/$', 'data_table_expert_list'),
url(r'^data_table/no_neuron_list/$', 'data_table_no_neuron_list'),
# for asking someone to become a curator
url(r'^neuron/(?P<neuron_id>\d+)/curate_list/$', 'neuron_article_curate_list'),
url(r'^neuron/(?P<neuron_id>\d+)/curator_ask/$', 'neuron_curator_ask'),
url(r'^neuron/(?P<neuron_id>\d+)/become_curator/$', 'neuron_become_curator'),
# suggesting articles for curation
#TODO: why are these broken?
#url(r'^neuron/(?P<neuron_id>\d+)/article_suggest/$', 'neuron_article_suggest'),
#url(r'^neuron/(?P<neuron_id>\d+)/article_suggest_post/$', 'neuron_article_suggest_post'),
#url(r'^article_suggest/$', 'article_suggest'),
#url(r'^article_suggest_post/$', 'article_suggest_post'),
)
# Override tastypie.serializers.Serializer.to_html so that 'format=json' is not needed.
# json will be the new default, and a request for html will be passed to the json serializer.
# Remove if/when tastypie implements the to_html serializer.
def to_html(self, data, options=None):
return Serializer.to_json(self, data, options=options) # RICK EDIT
Serializer.to_html = to_html
# Add every resource class (except the base resource class) in neuroelectro.api to the API.
v1_api = Api(api_name='1')
for (class_name,class_object) in inspect.getmembers(neuroelectro.api):
if 'Resource' in class_name and class_name != 'ModelResource':
v1_api.register(class_object())
urlpatterns += patterns("",
#url('^admin/', include(admin.site.urls)),
url(r'^api/', include(v1_api.urls)),
url('', include('social.apps.django_app.urls', namespace='social'))
)
| lessc0de/neuroelectro_org | neuroelectro/urls.py | Python | gpl-2.0 | 4,885 | [
"NEURON"
] | d63ca2740787891108e5ab01111980c3a0d5c88c70b8848356023b2ed007e26f |
import pysam
from svviz.tabix import ensureIndexed
class AnnotationSet(object):
def __init__(self, tabixPath, preset="bed"):
self.preset = preset
self.tabixPath = ensureIndexed(tabixPath, self.preset)
self._tabix = None
self.usingChromFormat = False
self._checkChromFormat()
def __getstate__(self):
""" allows pickling of DataHub()s """
state = self.__dict__.copy()
state["_tabix"] = None
return state
@property
def tabix(self):
if self._tabix is None:
self._tabix = pysam.Tabixfile(self.tabixPath)
return self._tabix
def _checkChromFormat(self):
usingChromFormat = 0
count = 0
for anno in self.tabix.fetch():
if anno.startswith("#"):
continue
if anno.startswith("chr"):
self.usingChromFormat += 1
if count > 10:
break
count += 1
if usingChromFormat / float(count) > 0.8:
self.usingChromFormat = True
def fixChromFormat(self, chrom):
if not chrom.startswith("chr") and self.usingChromFormat:
chrom = "chr" + str(chrom)
if chrom.startswith("chr") and not self.usingChromFormat:
chrom = chrom.replace("chr", "")
return chrom
def getAnnotations(self, chrom, start, end, clip=False):
""" Returns annotations, in genome order, from the requested genomic region """
annotations = []
chrom = self.fixChromFormat(chrom)
if chrom not in self.tabix.contigs:
return []
for row in self.tabix.fetch(chrom, start, end):
values = row.split("\t")
anno = Annotation(values[0], values[1], values[2], values[5], values[3])
if clip:
anno.start = max(anno.start, start)
anno.end = min(anno.end, end)
annotations.append(anno)
return annotations
class Annotation(object):
def __init__(self, chrom, start, end, strand, name, info=None):
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.name = name
self.info = info if info is not None else {}
@property
def label(self):
return self.name | svviz/svviz | src/svviz/annotations.py | Python | mit | 2,368 | [
"pysam"
] | ec8ebfcf64a9764d77697f8841c153f0f49e1c92b827280e23430df877ae3054 |
""" ProxyProvider implementation for the proxy generation using local a PUSP
proxy server
"""
import urllib
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Resources.ProxyProvider.ProxyProvider import ProxyProvider
__RCSID__ = "$Id$"
class PUSPProxyProvider(ProxyProvider):
def __init__(self, parameters=None):
super(PUSPProxyProvider, self).__init__(parameters)
def getProxy(self, userDict):
""" Generate user proxy
:param dict userDict: user description dictionary with possible fields:
FullName, UserName, DN, EMail, DiracGroup
:return: S_OK(basestring)/S_ERROR() -- basestring is a proxy string
"""
userDN = userDict.get('DN')
if not userDN:
return S_ERROR('Incomplete user information')
diracGroup = userDict.get('DiracGroup')
if not diracGroup:
return S_ERROR('Incomplete user information')
result = Registry.getGroupsForDN(userDN)
if not result['OK']:
return result
validGroups = result['Value']
if diracGroup not in validGroups:
return S_ERROR('Invalid group %s for user' % diracGroup)
voName = Registry.getVOForGroup(diracGroup)
if not voName:
return S_ERROR('Can not determine VO for group %s' % diracGroup)
csVOMSMapping = Registry.getVOMSAttributeForGroup(diracGroup)
if not csVOMSMapping:
return S_ERROR("No VOMS mapping defined for group %s in the CS" % diracGroup)
vomsAttribute = csVOMSMapping
vomsVO = Registry.getVOMSVOForGroup(diracGroup)
puspServiceURL = self.parameters.get('ServiceURL')
if not puspServiceURL:
return S_ERROR('Can not determine PUSP service URL for VO %s' % voName)
user = userDN.split(":")[-1]
puspURL = "%s?voms=%s:%s&proxy-renewal=false&disable-voms-proxy=false" \
"&rfc-proxy=true&cn-label=user:%s" % (puspServiceURL, vomsVO, vomsAttribute, user)
try:
proxy = urllib.urlopen(puspURL).read()
except Exception as e:
return S_ERROR('Failed to get proxy from the PUSP server')
chain = X509Chain()
chain.loadChainFromString(proxy)
chain.loadKeyFromString(proxy)
result = chain.getCredentials()
if not result['OK']:
return S_ERROR('Failed to get a valid PUSP proxy')
credDict = result['Value']
if credDict['identity'] != userDN:
return S_ERROR('Requested DN does not match the obtained one in the PUSP proxy')
timeLeft = credDict['secondsLeft']
result = chain.generateProxyToString(lifeTime=timeLeft,
diracGroup=diracGroup)
if not result['OK']:
return result
proxyString = result['Value']
return S_OK((proxyString, timeLeft))
def getUserDN(self, userDict):
""" Get DN of the user certificate that will be created
:param dict userDict:
:return: S_OK/S_ERROR, Value is the DN string
"""
userDN = userDict.get('DN')
if not userDN:
return S_ERROR('Incomplete user information')
return S_OK(userDN)
| chaen/DIRAC | Resources/ProxyProvider/PUSPProxyProvider.py | Python | gpl-3.0 | 3,140 | [
"DIRAC"
] | 82cfd4370fdfe752b7bbe455f910c297cd7349e3e66dff7207a10ae79d547c0e |
#!/usr/bin/env python
"""
lttree_postprocess.py
This is a stand-alone python script which checks the files created by
lttree.py to insure that the standard instance-variables ($variables)
have all been defined. This script performs a task which is very similar
to the task performed by lttree_check.py. This script attempts to detect
mistakes in the names of $atom, $bond, $angle, $dihedral, $improper, & $mol
variables.
"""
import sys
try:
from .lttree_styles import *
from .ttree_lex import ExtractCatName
except (ImportError, SystemError, ValueError):
# not installed as a package
from lttree_styles import *
from ttree_lex import ExtractCatName
g_program_name = __file__.split('/')[-1] # = 'lttree_postprocess.py'
g_version_str = '0.5.1'
g_date_str = '2017-8-23'
def main():
atom_style = 'full'
ttree_assignments_fname = 'ttree_assignments.txt'
defined_mols = set([])
defined_atoms = set([])
defined_bonds = set([])
defined_angles = set([])
defined_dihedrals = set([])
defined_impropers = set([])
g_no_check_msg = \
'(To override this error, run moltemplate using the \"-nocheck\" argument.)\n'
if len(sys.argv) > 1:
for i in range(0, len(sys.argv)):
if ((sys.argv[i].lower() == '-atomstyle') or
(sys.argv[i].lower() == '-atom-style') or
(sys.argv[i].lower() == '-atom_style')):
if i + 1 >= len(sys.argv):
raise InputError('Error(' + g_program_name + '): The ' + sys.argv[i] + ' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
atom_style = sys.argv[i + 1]
elif ((sys.argv[i].lower() == '-ttreeassignments') or
(sys.argv[i].lower() == '-ttree-assignments') or
(sys.argv[i].lower() == '-ttree_assignments')):
if i + 1 >= len(sys.argv):
raise InputError('Error(' + g_program_name + '): The ' + sys.argv[i] + ' flag should be followed by \n'
' a file containing the variable bindings created by ttree/moltemplate.\n')
ttree_assignments_fname = sys.argv[i + 1]
else:
pass # ignore other arguments (they are intended for lttree.py)
atom_column_names = AtomStyle2ColNames(atom_style)
i_atomid = 0
i_molid = -1
for i in range(0, len(atom_column_names)):
if atom_column_names[i].lower() == 'atom-id':
i_atomid = i
elif atom_column_names[i].lower() == 'molecule-id':
i_molid = i
i_max_column = max(i_atomid, i_molid)
# The following variables are defined in "lttree_styles.py"
#data_atoms="Data Atoms"
#data_masses="Data Masses"
#data_velocities="Data Velocities"
#data_bonds="Data Bonds"
#data_angles="Data Angles"
#data_dihedrals="Data Dihedrals"
#data_impropers="Data Impropers"
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + '\n')
try:
# ------------ defined_atoms ------------
try:
f = open(data_atoms + '.template', 'r')
except:
raise InputError('Error(' + g_program_name + '): Unable to open file\n' +
'\"' + data_atoms + '.template\"\n'
' for reading. (Do your files lack a \"' +
data_atoms + '\" section?)\n'
+ g_no_check_msg + '\n')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
pass
elif len(tokens) <= i_max_column:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_atoms + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This my probably an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_atoms.add(tokens[i_atomid])
if i_molid != -1:
defined_mols.add(tokens[i_molid])
f.close()
# ------------ defined_bonds ------------
try:
f = open(data_bonds + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
pass
elif len(tokens) < 4:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_bonds + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This my probably an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_bonds.add(tokens[0])
f.close()
except:
pass # Defining bonds (stored in the data_bonds file) is optional
# ------------ defined_angles ------------
try:
f = open(data_angles + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
pass
elif len(tokens) < 5:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_angles + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This my probably an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_angles.add(tokens[0])
f.close()
except:
pass # Defining angles (stored in the data_angles file) is optional
# ------------ defined_dihedrals ------------
try:
f = open(data_dihedrals + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
pass
elif len(tokens) < 6:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_dihedrals + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This my probably an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_dihedrals.add(tokens[0])
f.close()
except:
# Defining dihedrals (stored in the data_dihedrals file) is optional
pass
# ------------ defined_impropers ------------
try:
f = open(data_impropers + '.template', 'r')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
pass
elif len(tokens) < 6:
raise InputError('Error(' + g_program_name + '): The following line from\n'
' "\"' + data_impropers + '.template\" has bad format:\n\n'
+ line_orig + '\n'
' This my probably an internal error. (Feel free to contact the developer.)\n'
+ g_no_check_msg + '\n')
else:
defined_impropers.add(tokens[0])
f.close()
except:
# Defining impropers (stored in the data_impropers file) is optional
pass
# ---- Check ttree_assignments to make sure variables are defined ----
try:
f = open(ttree_assignments_fname, 'r')
except:
raise InputError('Error(' + g_program_name + '): Unable to open file\n' +
'\"' + ttree_assignments_fname + '\"\n'
' for reading. (Do your files lack a \"' +
data_atoms + '\" section?)\n'
+ g_no_check_msg + '\n')
for line_orig in f:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
usage_location_str = 'near ' + line_orig[ic + 1:]
else:
line = line_orig.rstrip('\n')
usage_location_str = ''
tokens = line.strip().split()
if len(tokens) == 0:
pass
if len(tokens) > 0:
# This file contains a list of variables of the form:
#
# @/atom:MoleculeType1:C 1
# @/atom:MoleculeType1:H 2
# @/atom:MoleculeType2:N 3
# $/atom:molecule1:N1 1
# $/atom:molecule1:C1 2
# :
# $/atom:molecule1141:CH 13578
# $/atom:molecule1142:N3 13579
# :
# We only care about instance variables (which use the '$' prefix)
# Lines corresponding to static variables (which use the '@' prefix)
# are ignored during this pass.
i_prefix = tokens[0].find('$')
if i_prefix != -1:
descr_str = tokens[0][i_prefix + 1:]
cat_name = ExtractCatName(descr_str)
if ((cat_name == 'atom') and
(tokens[0] not in defined_atoms)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $atom:\n\n'
' ' + tokens[0] + ' (<--full name)\n\n' +
' (If that atom belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
elif ((cat_name == 'bond') and
(tokens[0] not in defined_bonds)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $bond:\n\n'
' ' + tokens[0] + ' (<--full name)\n\n' +
' (If that bond belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
elif ((cat_name == 'angle') and
(tokens[0] not in defined_angles)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $angle:\n\n' +
' ' + tokens[0] + ' (<--full name)\n\n' +
' (If that angle belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
elif ((cat_name == 'dihedral') and
(tokens[0] not in defined_dihedrals)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n\n' +
' Reference to undefined $dihedral:\n\n'
' ' + tokens[0] + ' (<--full name)\n\n' +
' (If that dihedral belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
elif ((cat_name == 'improper') and
(tokens[0] not in defined_impropers)):
raise InputError('Error(' + g_program_name + '): ' + usage_location_str + '\n' +
' Reference to undefined $improper:\n\n'
' ' + tokens[0] + ' (<--full name)\n\n' +
' (If that improper belongs to a molecule (or other subunit), make sure that\n' +
' you specified the correct path which leads to it (using / and ..))\n\n' +
g_no_check_msg)
# I used to generate an error when a users defines a $mol
# variable but does not associate any atoms with it (or if the
# user systematically deletes all the atoms in that molecule),
# but I stopped this practice.
# I don't think there is any real need to complain if some
# molecule id numbers are undefined. LAMMPS does not care.
#
# elif ((cat_name == 'mol') and
# (tokens[0] not in defined_mols)):
# raise InputError('Error('+g_program_name+'): '+usage_location_str+'\n'+
# ' Reference to undefined $mol (molecule-ID) variable:\n\n'
# ' '+tokens[0]+' (<--full name)\n\n'+
# ' (If that molecule is part of a larger molecule, then make sure that\n'+
# ' you specified the correct path which leads to it (using / and ..))\n\n'+
# g_no_check_msg)
f.close()
sys.stderr.write(g_program_name + ': -- No errors detected. --\n')
exit(0)
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(1)
return
if __name__ == '__main__':
main()
| ibethune/lammps | tools/moltemplate/moltemplate/lttree_postprocess.py | Python | gpl-2.0 | 15,992 | [
"LAMMPS"
] | 70e41b1123b3351505ee5bb41bdd1132ea778d1e5c5642b8038462ad786d2718 |
from __future__ import print_function
import numpy as np
from chemlab.core import System, Atom, Molecule
from chemlab.io import datafile, add_default_handler
from chemlab.io.handlers import GromacsIO
from chemlab.io.handlers import EdrIO
from nose.tools import assert_raises, eq_
from nose.plugins.skip import SkipTest
from .testtools import assert_npequal, assert_eqbonds, assert_allclose
def test_datafile():
df = datafile("tests/data/cry.gro") # It guesses
sys = df.read("system")
eq_(sys.n_atoms, 1728)
assert_npequal(sys.type_array[:3], ['Li', 'Li', 'Li'])
assert_npequal(sys.type_array[-3:], ['Cl', 'Cl', 'Cl'])
def test_read_pdb():
df = datafile('tests/data/3ZJE.pdb')
s = df.read('system')
assert_npequal(s.type_array[:3], ['N', 'C', 'C'])
def test_read_pdb_secondary():
system = datafile('tests/data/pdb1g8p.ent', format='pdb').read('system')
# assert np.all(system.secondary_structure[106:109] == 'S')
eq_(system.sub(secondary_id=1).residue_name[0], 'VAL')
assert_npequal(system.sub(secondary_id=1).residue_name, ['VAL', 'VAL', 'ASP', 'LEU'])
helices = system.sub(secondary_structure='H')
helix = helices.sub(secondary_id=helices.secondary_id[0])
assert_npequal(helix.residue_name, ['PRO', 'PHE', 'SER', 'ALA', "ILE"])
def test_write_pdb():
water = Molecule([Atom('O', [0.0, 0.0, 0.0], export={'pdb.type': 'O'}),
Atom('H', [0.1, 0.0, 0.0], export={'pdb.type': 'H'}),
Atom('H', [-0.03333, 0.09428, 0.0], export={'pdb.type': 'H'})],
export={'groname': 'SOL'})
sys = System.empty()
with sys.batch() as b:
for i in range(200):
water.r_array += 0.1
b.append(water.copy())
df = datafile('/tmp/dummy.pdb', mode="w")
df.write("system", sys)
def test_read_gromacs():
'''Test reading a gromacs file'''
df = datafile('tests/data/cry.gro')
s = df.read('system')
def test_write_gromacs():
water = Molecule([Atom('O', [0.0, 0.0, 0.0], name="OW"),
Atom('H', [0.1, 0.0, 0.0], name='HW1'),
Atom('H', [-0.03333, 0.09428, 0.0], name='HW2')],
name='SOL')
sys = System.empty()
with sys.batch() as b:
for i in range(200):
b.append(water.copy())
sys.box_vectors = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
df = datafile('/tmp/dummy.gro', mode="w")
df.write('system', sys)
with assert_raises(Exception):
df = datafile('/tmp/dummy.gro')
df.write('system', sys)
df = datafile('/tmp/dummy.gro')
sread = df.read('system')
assert all(sread.type_array == sys.type_array)
def test_read_edr():
df = datafile('tests/data/ener.edr')
#df.read('frames')
dt, temp = df.read('quantity', 'Temperature')
unit = df.read('units', 'Temperature')
try:
df.read('quantity', 'NonExistent')
except:
pass
def test_read_xyz():
df = datafile('tests/data/sulphoxide.xyz')
mol1 = df.read('molecule')
df = datafile('/tmp/t.xyz', mode="w")
df.write('molecule', mol1)
df = datafile('/tmp/t.xyz', mode="rb")
mol2 = df.read('molecule')
assert np.allclose(mol1.r_array, mol2.r_array)
assert all(mol1.type_array == mol2.type_array)
def test_read_mol():
df = datafile('tests/data/benzene.mol')
mol1 = df.read('molecule')
def test_read_xtc():
df = datafile('tests/data/trajout.xtc')
traj = df.read('trajectory')
def test_read_cml():
df = datafile('tests/data/mol.cml')
mol = df.read("molecule")
def test_write_cml():
df = datafile('tests/data/mol.cml')
mol = df.read("molecule")
df = datafile('/tmp/sadf.cml', 'w')
df.write('molecule', mol)
def test_read_cclib():
try:
import cclib
except:
raise SkipTest
df = datafile('tests/data/cclib/water_mp2.out', format='gamess')
# Reading a properties that does exist
result1 = df.read('gbasis')
result2 = [[('S',
[(130.7093214, 0.154328967295),
(23.8088661, 0.535328142282),
(6.4436083, 0.444634542185)]),
('S',
[(5.0331513, -0.099967229187),
(1.1695961, 0.399512826089),
(0.380389, 0.70011546888)]),
('P',
[(5.0331513, 0.155916274999),
(1.1695961, 0.607683718598),
(0.380389, 0.391957393099)])],
[('S',
[(3.4252509, 0.154328967295),
(0.6239137, 0.535328142282),
(0.1688554, 0.444634542185)])],
[('S',
[(3.4252509, 0.154328967295),
(0.6239137, 0.535328142282),
(0.1688554, 0.444634542185)])]]
assert result1 == result2
| chemlab/chemlab | tests/test_io.py | Python | gpl-3.0 | 4,636 | [
"GAMESS",
"Gromacs",
"cclib"
] | 1e54c92dc37140cc8099a0c8ac08678611dec4701e5db5f5e80ae03c198786cc |
# -*- coding: utf-8 -*-
# benchmark.py ---
#
# Filename: benchmark.py
# Description: Script for performance benchmarking
# Author:
# Maintainer:
# Created: Thu Jan 23 16:06:25 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""This script tries to run the test case multiple times with
different number of nodes and dump the performance. It shuffles the
runs with increasing number of processes in an attempt to avoid
systematic error based on process sequence.
"""
from __future__ import print_function
import getopt
import subprocess
import sys
import os
import socket
import multiprocessing
import subprocess
from datetime import datetime, timedelta
from collections import defaultdict
import random
def create_hostfile(slotcount=None, filename='hostfile'):
"""Create a file with name `filename` and write host info for openmpi
in this."""
if slotcount is None:
slotcount = multiprocessing.cpu_count()
with open(filename, 'w') as hostfile:
hostfile.write('%s slots=%d\n' % (socket.gethostname(), slotcount))
def run(script, scriptargs=[], hostfile='hostfile', np=2, ni=1, oversubscribe=False):
"""Run `script` with arguments in `scriptargs` list. Use `hostfile`
for host and slot info, use `np` moose processes in addition to
Python, do `ni` iterations to get average performance."""
if not oversubscribe:
np = multiprocessing.cpu_count()
s0 = datetime.now()
avg = defaultdict(timedelta)
procs = range(np)
for jj in range(ni):
random.shuffle(procs)
for ii in procs:
print('Running with', ii, 'processes')
if ii > 0:
args = ['mpirun', '--hostfile', hostfile, '-np', '1', 'python', script] + scriptargs + [':', '-np', str(ii), '../../../moose']
else:
args = ['mpirun', '--hostfile', hostfile, '-np', '1', 'python', script] + scriptargs
print('Running:', args)
s1 = datetime.now()
ret = subprocess.check_output(args)
print('====================== OUTPUT START ======================')
print(ret)
print('====================== OUTPUT END ======================')
e1 = datetime.now()
d1 = e1 - s1
avg[ii] += d1
print('Time to run ', args)
print(' =', d1.days * 86400 + d1.seconds + 1e-6 * d1.microseconds)
e0 = datetime.now()
d0 = e0 - s0
avg['all'] += d0
for ii in sorted(procs):
print('Time to run using', ii, 'additional moose processes: ')
print(' =', (avg[ii].days * 86400 + avg[ii].seconds + 1e-6 * avg[ii].microseconds) / ni)
print('Total time for all different process counts averaged over all', ni, 'iterations:', (avg['all'].days * 86400 + avg['all'].seconds + 1e-6 * avg['all'].microseconds) / ni)
def print_usage(argv0):
print('''Usage: %s [-s slotcount] [-f hostfile] [-n maxprocess] [-i iterations] {script} [script arguments]
Run {script} using up to {slotcount} slots and display
execution time. If specified, the host information will be
written in `hostfile`. Default is "hostfile".''' % (argv0))
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) < 2:
print_usage(sys.argv[0])
hostfile = 'hostfile'
scriptargs = []
slots = None
np = 1
ni = 1
options, rest = getopt.getopt(sys.argv[1:], 'hf:s:n:i:', ['hostfile=','slots=', 'help', 'numproc='])
for opt, arg in options:
if opt in ('-h', '--help'):
print_usage(sys.argv[0])
elif opt in ('-f', '--hostfile'):
hostfile = arg
elif opt in ('-s', '--slots'):
slots = int(arg)
elif opt in ('-n', '--numproc'):
np = int(arg)
elif opt in ('-i', '--iterations'):
ni = int(arg)
if len(rest) < 1:
print_usage(sys.argv[0])
else:
script = rest[0]
if len(rest) > 1:
scriptargs = rest[1:]
create_hostfile(slotcount=slots, filename=hostfile)
run(script, scriptargs, hostfile=hostfile, np=np, ni=ni)
#
# benchmark.py ends here
| dharmasam9/moose-core | tests/python/mpi/benchmark.py | Python | gpl-3.0 | 4,984 | [
"MOOSE"
] | 71bf6439fe1f4e6c43a3ab40a42a322ac074d7aec185063148613b8499dd36a0 |
import numpy as np
import pandas as pd
returns = prices.pct_change()
returns.dropna()
returns.std()
deviations = (returns - returns.mean())**2
squared_deviations = deviations ** 2
variance = squared_deviations.mean()
volatility = np.sqrt(variance)
me_m = pd.read_csv('./Data/Portfolios_Formed_on_ME_monthly_EW.csv',
header=0, index_col=0, parse_dates=True, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets / 100
rets.plot.line()
rets.head()
rets.index = pd.to_datetime(rets.index, format='%Y%m')
rets.head()
rets.index = rets.index.to_period('M')
rets['1975']
wealth_index = 1000 * (1+rets['LargeCap']).cumprod()
wealth_index.plot.line()
previous_peaks = wealth_index.cummax()
previous_peaks.plot.line()
drawdown = (wealth_index - previous_peaks) / previous_peaks
drawdown.plot()
drawdown.min()
drawdown['1975':].min()
drawdown['1975':].idxmin()
def drawdown(return_series: pd.Series):
"""
Takes a time series of asset returns
Computes and returns a DataFrame that contains:
the wealth index
the previous peaks
percent drawdowns
:param return_series:
:return:
"""
wealth_index = 1000 * (1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks) / previous_peaks
return pd.DataFrame(
{
"Wealth": wealth_index,
"Peaks": previous_peaks,
"Drawdown": drawdowns
}
)
drawdown(rets['LargeCap']).head()
drawdown(rets['LargeCap'])[['Wealth', 'Peaks']].plot()
import pandas as pd
import EDHEC.edhec_risk_kit as erk
hfi = erk.get_hfi_returns()
hfi.head()
pd.concat([hfi.mean(), hfi.median(), hfi.mean()>hfi.median()], axis='columns')
erk.skewness(hfi).sort_values()
import scipy.stats
scipy.stats.skew(hfi)
import numpy as np
normal_rets = np.random.normal(0, .15, size=(263, 1))
erk.skewness(normal_rets)
erk.kurtosis(normal_rets)
erk.kurtosis(hfi)
scipy.stats.kurtosis(normal_rets)
scipy.stats.jarque_bera(normal_rets)
scipy.stats.jarque_bera(hfi)
erk.is_normal(normal_rets)
hfi.aggregate(erk.is_normal)
ffme = erk.get_ffme_returns()
erk.skewness(ffme)
erk.kurtosis(ffme)
hfi.std(ddof=0)
hfi[hfi<0].std(ddof=0)
erk.semideviation(hfi)
# Historical VaR
# Parametric VaR - Gaussian
# Modified Cornish-Fisher VaR
np.percentile(hfi, q=5, axis=0)
hfi.apply(lambda x: np.percentile(x, q=5, axis=0))
erk.var_historic(hfi)
from scipy.stats import norm
z = norm.ppf(.05)
hfi.mean() + z*hfi.std(ddof=0)
erk.var_gaussian(hfi)
var_list = [erk.var_gaussian(hfi), erk.var_gaussian(hfi, modified=True), erk.var_historic(hfi)]
comparison = pd.concat(var_list, axis=1)
comparison.columns = ['Gaussian', 'Cornish-Fisher', 'Historic']
comparison.plot.bar(title='EDHEC Hedge Fund Indices: VaR Comparison')
erk.cvar_historic(hfi)
| nealchenzhang/Py4Invst | EDHEC/CourseOne.py | Python | mit | 2,869 | [
"Gaussian"
] | 1411790a3a4194d463abc2ea6258e806096a31fe37097123e61dba30e647ee16 |
"""
Ax_Metrics - EROut plugin 'geckoboard_bullet'
Writes Geckoboard JSON output for various charts for use with
http://www.geckoboard.com.
Contents:
- EROut_geckoboard_bullet - bullet chart
See:
- https://developer.geckoboard.com/#bullet-graph
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
import time
from axonchisel.metrics.foundation.ax.dictutil import OrderedDict
from axonchisel.metrics.foundation.chrono.stepper import Stepper
from .base import EROut_geckoboard
import logging
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------
class EROut_geckoboard_bullet(EROut_geckoboard):
"""
EROut (Extensible Report Outputter) Plugin for Geckoboard bullet charts.
Adds JSON-serializable output to extinfo['jout'] dict.
Supports multiple invocations to include multiple bullets in one output,
for use with 2x2 Geckoboard tile that can handle up to 4 bullets.
Typical usage is with collapsed query, default 'LAST' reduce function,
and ghosts disabled. This prevent needless queries from running.
Non-collapsed queries with other reduce functions may be used too.
Compatibility note:
Targets Geckoboard bullet chart v1 ~2014-11 including workarounds for
some of their bugs. GB intends to release a new bullet chart version soon
for which a new EROut should be written.
QFormat support (under 'geckoboard_bullet' or '_default'):
reduce : Function from metricdef.FUNCS to reduce series with
title : (optional) Title for bullet
subtitle : (optional) Subtitle for bullet
orientation : (Optional) 'horizontal' or 'vertical'
More info:
- https://developer.geckoboard.com/#bullet-graph
- http://www.perceptualedge.com/articles/misc/Bullet_Graph_Design_Spec.pdf
- http://en.wikipedia.org/wiki/Bullet_graph
Example JSON:
{
"orientation": "horizontal",
"item": {
"label": "Revenue 2014 YTD",
"sublabel": "(U.S. $ in thousands)",
"axis": {
"point": ["0", "200", "400", "600", "800", "1000"]
},
"range": {
"red": {
"start": 0,
"end": 400
},
"amber": {
"start": 401,
"end": 700
},
"green": {
"start": 701,
"end": 1000
}
},
"measure": {
"current": {
"start": "0",
"end": "500"
},
"projected": {
"start": "100",
"end": "900"
}
},
"comparative": {
"point": "600"
}
}
}
"""
#
# Abstract Method Implementations
#
# abstract
def plugin_output(self, mdseries, query=None):
"""
EROut plugins must implement this abstract method.
Invoked to output MultiDataSeries as specified.
Returns nothing. Output target should be configured separately.
"""
log.debug("Outputting %s for query %s", mdseries, query)
self._qfdomain = 'geckoboard_bullet'
# Set orientation (may override previous query's):
if self.query:
try:
orientation = self.query.qformat.get(
'geckoboard_bullet', 'orientation')
self.jout['orientation'] = orientation
except KeyError:
pass
# Iterate MDS, writing each series:
for dseries in mdseries.iter_series():
self._write_series(dseries)
#
# Internal Methods
#
def _write_series(self, dseries):
"""
Write the current DataSeries to output as a bullet chart.
(Geckoboard supports up to 4 bullet charts in the JSON,
so up to 4 DataSeries can be used)
"""
# Prep:
self._dseries = dseries
self._write_series_prep()
# Calculate details:
self._write_series_query_adjust()
self._write_series_calc_axis()
self._write_series_calc_rag()
# Add overall item to jout:
self.jout['item'].append(self._jitem)
def _write_series_prep(self):
"""Prepare internal data for new DataSeries."""
# Reduce series to single value by reduce func.
# Usually func 'LAST' with collapsed series (Servant option),
# but other operations can be useful too, e.g. AVG, etc.
reduce_func = self._qformat_get('reduce', 'LAST')
self._value = self._dseries.reduce(reduce_func)
# Prep JSON-serializable template to fill in:
self._jitem = {
"label": "",
"sublabel": "",
"axis": {
"point": []
},
# Note: Due to Geckoboard bullet specification design flaw,
# we must use OrderedDict to ensure the ranges are in order.
"range": OrderedDict([
("red", { "start": 0, "end": 0 }),
("amber", { "start": 0, "end": 0 }),
("green", { "start": 0, "end": 0 })
]),
"measure": {
"current": { "start": 0, "end": 0 },
# "projected": { "start": 0, "end": 0 }
},
# "comparative": { "point": 0 }
}
# Prep initial data:
self._minval = 0
self._maxval = self._value
self._is_int = self._is_round(self._maxval)
self._qmetric = None
# Set known jitem data:
self._jitem['measure']['current']['begin'] = self._minval
self._jitem['measure']['current']['end'] = self._value
def _write_series_query_adjust(self):
"""Adjust settings based on Query (if provided)."""
if not self.query:
return
# Check format labels:
qformat = self.query.qformat
domain = 'geckoboard_bullet'
self._jitem['label'] = qformat.get(domain, 'title', "")
self._jitem['sublabel'] = qformat.get(domain, 'subtitle', "")
# Check qmetric:
self._qmetric = self.query.qdata.get_qmetric(0)
# If goal specified, handle it:
if self._qmetric.goal is not None:
self._jitem['comparative'] = {'point': self._qmetric.goal}
self._maxval = max(self._maxval, self._qmetric.goal)
if self._is_int:
self._is_int = self._is_round(self._maxval)
# Projected value based on current relative to query time frame:
if self.query.qtimeframe.tmfrspec.mode == 'CURRENT':
dseries = self._dseries
stepper = Stepper(dseries.tmfrspec, ghost=dseries.ghost)
tmrange = stepper.analyze()['tmrange']
# Calc tfrac as how far into time period we are, [0..1]
t0 = time.mktime(tmrange.inc_begin.timetuple())
t1 = time.mktime(tmrange.exc_end.timetuple())
tnow = time.time()
tfrac = 1.0 * (tnow - t0) / (t1 - t0)
if 0.0 <= tfrac <= 1.0:
# Calculate projected value based on current value and tfrac:
proj = self._minval + ((self._value - self._minval) / tfrac)
self._maxval = max(self._maxval, proj)
self._jitem['measure']['projected'] = {}
self._jitem['measure']['projected']['start'] = self._minval
self._jitem['measure']['projected']['end'] = proj
else:
# May have been reframed, etc., so just skip projection
pass
def _write_series_calc_axis(self):
"""Calculate user-friendly axis values based on data."""
AXIS_MAX_MULTIPLIER = 1.25 # some breathing room above max value
AXIS_SIG_DIGS = 3 # how many significant digits to round to
AXIS_POINTS = 5 # how many axis points to label
# Calc initial axis min/max values and step size:
aminval = self._minval
amaxval = self._round_sigdigs((self._maxval * AXIS_MAX_MULTIPLIER),
AXIS_SIG_DIGS)
axis_step = (amaxval - aminval) / (AXIS_POINTS - 1)
# Some rounding if we're operating on ints:
if self._is_int:
if not (self._is_round(axis_step) and self._is_round(amaxval)):
axis_step = round(axis_step)
amaxval = aminval + (axis_step * (AXIS_POINTS - 1))
# Calc axis points:
for n in range(AXIS_POINTS):
aval = aminval + (n * axis_step)
aval = self._round_sigdigs(aval, AXIS_SIG_DIGS)
if self._is_int:
aval = "%d" % (aval)
else:
aval = "%.*f" % (AXIS_SIG_DIGS-1, aval)
self._jitem['axis']['point'].append(aval)
def _write_series_calc_rag(self):
"""Calculate red/amber/green cutoffs."""
# Prep with default cutoff points based on data range:
od_range = self._jitem['range']
rag_c1 = self._minval + ((self._maxval - self._minval) / 3.0 * 1)
rag_c2 = self._minval + ((self._maxval - self._minval) / 3.0 * 2)
# If negative impact (e.g. expenses, bugs, ...), invert RAG:
if self.query and (self._qmetric.impact == 'NEGATIVE'):
# Reverse RAG order in dict (GB bug workaround):
od_range = OrderedDict(reversed(list(od_range.iteritems())))
self._jitem['range'] = od_range
# Apply cutoff points:
if self._qmetric.rag:
(rag_c2, rag_c1) = self._qmetric.rag
od_range['green'] = { "start": self._minval, "end": rag_c1 }
od_range['amber'] = { "start": rag_c1, "end": rag_c2 }
od_range['red'] = { "start": rag_c2, "end": self._maxval }
# Else normal positive impact (e.g. revenue, sales, ...):
else:
assert self._qmetric.impact == 'POSITIVE'
# Apply cutoff points:
if self.query and (self._qmetric.rag):
(rag_c1, rag_c2) = self._qmetric.rag
od_range['red'] = { "start": self._minval, "end": rag_c1 }
od_range['amber'] = { "start": rag_c1, "end": rag_c2 }
od_range['green'] = { "start": rag_c2, "end": self._maxval }
| axonchisel/ax_metrics | py/axonchisel/metrics/io/erout/plugins/ero_geckoboard/bullet.py | Python | mit | 10,675 | [
"Amber"
] | 3613b136dafbef377dc5823ff358a9a5b5ee9ba37f84029b6c8ef657f9f3aea1 |
#
# Author: Henrique Pereira Coutada Miranda
# Run a BSE calculation using yambo
#
from __future__ import print_function
import sys
from yambopy import *
from qepy import *
from schedulerpy import *
import argparse
import shutil
yambo = "yambo"
p2y = "p2y"
prefix = 'bn'
scheduler = Scheduler.factory
def create_save():
#check if the nscf cycle is present
if os.path.isdir('nscf/%s.save'%prefix):
print('nscf calculation found!')
else:
print('nscf calculation not found!')
exit()
#check if the SAVE folder is present
if not os.path.isdir('database'):
print('preparing yambo database')
shell = scheduler()
shell.add_command('pushd nscf/%s.save; %s; %s'%(prefix,p2y,yambo))
shell.add_command('popd')
shell.add_command('mkdir -p database')
shell.add_command('mv nscf/%s.save/SAVE database'%prefix)
shell.run()
#create the folder to run the calculation
if not os.path.isdir(folder):
shell = scheduler()
shell.add_command('mkdir -p %s'%folder)
shell.add_command('cp -r database/SAVE %s/'%folder)
shell.run()
def bse_convergence(what='dielectric',threads=1,nohup=False):
if nohup: nohup = 'nohup'
else: nohup = ''
#create the yambo input file
y = YamboIn('%s -b -o b -k sex -y d -V all'%yambo,folder=folder)
#default variables
y['BSEBands'] = [4,5]
y['BEnSteps'] = 500
y['BSEEhEny'] = [[1.0,10.0],'eV']
y['BEnRange'] = [[2.0,12.0],'eV']
y['BSENGexx'] = [10,'Ry']
y['KfnQP_E'] = [2.91355133,1.0,1.0] #some scissor shift
y['DBsIOoff'] = 'BS' #turn off writting BSE hamiltonian DB (better performance)
print(what)
if what == 'dielectric':
#list of variables to optimize the dielectric screening
conv = { 'FFTGvecs': [[10,15,20,30],'Ry'],
'NGsBlkXs': [[1,2,3,5,6], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30],[1,40]] }
else:
# converged parameters for epsilon
y['FFTGvecs'] = [30,'Ry']
y['NGsBlkXs'] = [2,'Ry']
y['BndsRnXs'] = [[1,40],'Ry']
# choose a large number of Bands in BSE
# BSEEhEny will choose which transitions to include
y['BSEBands'] = [1,10]
#list of variables to optimize the BSE
conv = { 'BSEEhEny': [[[1,10],[1,12],[1,14]],'eV'],
'BSENGBlk': [[0,1,2], 'Ry'],
'BSENGexx': [[10,15,20],'Ry']}
def run(filename):
"""
Function to be called by the optimize function
"""
path = filename.split('.')[0]
print(filename, path)
shell = scheduler()
shell.add_command('cd %s'%folder)
shell.add_command('%s mpirun -np %d %s -F %s -J %s -C %s 2> %s.log'%(nohup,threads,yambo,filename,path,path,path))
shell.add_command('touch %s/done'%path)
if not os.path.isfile("%s/%s/done"%(folder,path)):
shell.run()
y.optimize(conv,run=run)
def analyse():
#pack the files in .json files
pack_files_in_folder(folder)
paths = []
#get folder names
for dirpath,dirnames,filenames in os.walk(folder):
#ignore the root folder
if dirpath == folder:
continue
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
paths.append( dirpath.split('/')[-1] )
for path in paths:
print( path )
#get the absorption spectra
a = YamboBSEAbsorptionSpectra(path,path=folder)
excitons = a.get_excitons(min_intensity=0.0005,max_energy=7,Degen_Step=0.01)
print( "nexcitons: %d"%len(excitons) )
print( "excitons:" )
print( excitons )
a.get_wavefunctions(Degen_Step=0.01,repx=range(-1,2),repy=range(-1,2),repz=range(1))
a.write_json(path)
print( "To plot the data run:" )
print( "python bse_conv_bn.py -p -e" )
print( "python bse_conv_bn.py -p -b" )
def plot(what):
#plot the results using yambo analyser
y = YamboAnalyser(folder)
print(y)
fig = plt.figure(figsize=(10,8))
if what == "dielectric":
ax = plt.subplot(3,1,1)
y.plot_bse(['eps','FFTGvecs'],ax=ax)
ax = plt.subplot(3,1,2)
y.plot_bse(['eps','NGsBlkXs'],ax=ax)
ax = plt.subplot(3,1,3)
y.plot_bse(['eps','BndsRnXs'],ax=ax)
plt.tight_layout()
plt.show()
else:
ax = plt.subplot(3,1,1)
y.plot_bse(['eps','BSEEhEny'],ax=ax)
ax = plt.subplot(3,1,2)
y.plot_bse(['eps','BSENGBlk'],ax=ax)
ax = plt.subplot(3,1,3)
y.plot_bse(['eps','BSENGexx'],ax=ax)
plt.tight_layout()
plt.show()
print('done!')
if __name__ == "__main__":
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-r', '--run', action="store_true", help='run BSE convergence calculation')
parser.add_argument('-a', '--analyse', action="store_true", help='plot the results')
parser.add_argument('-p', '--plot', action="store_true", help='plot the results')
parser.add_argument('-e', '--epsilon', action="store_true", help='converge epsilon parameters')
parser.add_argument('-b', '--bse', action="store_true", help='converge bse parameters')
parser.add_argument('-u', '--nohup', action="store_true", help='run the commands with nohup')
parser.add_argument('-f', '--folder', default="bse_conv", help='choose folder to put the results')
parser.add_argument('-t', '--threads', default=1, type=int, help='number of threads to use')
parser.add_argument('--p2y', default="store_true", help='p2y executable')
parser.add_argument('--yambo', default="store_true", help='yambo executable')
args = parser.parse_args()
folder = args.folder
threads = args.threads
nohup = args.nohup
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if args.bse:
what = 'bse'
else:
what = 'dielectric'
create_save()
if args.run: bse_convergence(what=what,threads=threads,nohup=nohup)
if args.analyse: analyse()
if args.plot: plot(what)
| henriquemiranda/yambopy | tutorial/bn/bse_conv_bn.py | Python | bsd-3-clause | 6,243 | [
"Yambo"
] | 49fc980315fb1f756498d13fd4f88238050f163662e095321dbc01c75d39e1c9 |
'''Copyright(C): Leaf Johnson 2011
This file is part of makeclub.
makeclub is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
makeclub is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with makeclub. If not, see <http://www.gnu.org/licenses/>.
'''
from google.appengine.api import users
from google.appengine.ext import webapp
from models import Club, Membership
from url import urldict
from template import render
from access import hasClubPrivilige
from helper import lastWordOfUrl
from errors import errorPage
from infopage import infoPage
'''
Every response method will call visit() first, this will load user(operator),
targetUser(Target), and club(Target Club) from the request path,
if no targetUser specified, we'll assume current user as target user
than create a membership between club and targetUser
'''
urlconf = urldict['Member']
cvurlconf = urldict['ClubView']
class Member(webapp.RequestHandler):
def __init__(self,
template='member.html', *args, **kw ):
webapp.RequestHandler.__init__(self, *args, **kw)
self.template = template
self.club = None
self.user = None
self.member = None
self.targetUser = None
self.postStatus = ''
def post(self, *args):
if (self.visit()):
#If find 'delete' in the request data, we'll delete the member specify by the path
if (self.judgeDelete()):
self.doDelete()
infoPage(self.response, "Delete Succeed", "Deleted", urldict['ClubView'].path(self.club.slug))
return True
#Esle we'll construct membership object via postdata
member = self.getPostData()
if (member.put()):
self.postStatus = "Succeed"
self.member = member
self.get(*args)
#Delete method not supported yet
def delete(self, *args):
if (self.visit()):
if (self.member):
member = self.member
member.delete()
self.member = None
self.response.write("Succeed!")
#Get method will display the edit form
def get(self, *args):
if (self.visit()):
club = self.club
tempvars = dict (user = self.user,
action = urlconf.path(self.club.slug, self.user.email()),
member = self.getMember(),
club = self.club,
cluburl= cvurlconf.path(club.slug),
postStatus = self.postStatus,
enableFinace = hasClubPrivilige(users.get_current_user(), club, "finance" )
)
self.response.out.write (render(self.template, tempvars, self.request.url))
def judgeDelete(self):
return (self.request.get('delete', '') == "True")
def doDelete(self):
if (hasClubPrivilige(self.user, self.club, 'deleteMember', self.targetUser.email() )):
member = self.member
if (member):
return member.delete()
return False
def dbg(self, *args):
self.response.out.write(' '.join([str(arg) for arg in args]))
#Create user, club, and member_namebership information according to request path
def visit(self):
if (self.club and self.user):
return True
#Analyze req path first
slug, pathuser = urlconf.analyze(self.request.path)
#Get club
club = Club.getClubBySlug(slug)
if (not club):
return errorPage ( self.response, "No such club " + slug, '/clubs', 404)
#Check user status
user = users.get_current_user()
if (not user):
return errorPage ( self.response, "User not login", users.create_login_url(self.request.uri), self.response, 403)
#That the one we modify is the path user. if omitted, user current user as target
if (pathuser):
pathuser = users.User(pathuser)
else:
pathuser = user
#@warning: I don't know is it correct to add access control code here
if (not hasClubPrivilige(user, club, 'membership', pathuser.email())):
return errorPage ( self.response, "Can not access", '/', 403)
self.user = user
self.club = club
self.member = Membership.between(pathuser, club)
self.targetUser = pathuser
return True
#Must call after self.visit
def getPostData(self):
member = self.getMember()
getval = self.request.get
name = getval('name', '')
email = getval('email', '')
balance = getval('balance', '')
member.user = self.targetUser
if (name):
member.name = getval('name', '')
elif (not member.name):
member.name = self.user.nickname()
if (email):
member.email = getval('email', '')
elif (not member.email):
member.email = self.user.email()
if (balance and hasClubPrivilige(users.get_current_user(), member.club, "finance" )):
member.balance = balance
return member
#Could not launch if user is none
def getMember(self, user=''):
if (not user):
user = self.targetUser
if (self.member):
member = self.member
else:
member = Membership.between (user, self.club)
if (not member):
member = Membership (name = user.nickname(), email = user.email(), club=self.club, user = user)
return member
| cardmaster/makeclub | controlers/member.py | Python | agpl-3.0 | 5,244 | [
"VisIt"
] | f8ef2d049d48e9f2caec79a64f357cb82ca9fce37cd5c8620b2c670c616774bc |
from matplotlib.axes import Axes
from matplotlib import colors as mpl_colors
from matplotlib import path
from scipy import optimize
import numpy as np
import sys
from matplotlib import __version__
from . import colors
from . import _tools
mpl_2 = __version__[0] == "2"
class Axes_bpl(Axes):
name="bpl"
def make_ax_dark(self, minor_ticks=False):
"""Turns an axis into one with a dark background with white gridlines.
This will turn an axis into one with a slightly light gray background,
and with solid white gridlines. All the axes spines are removed (so
there isn't any outline), and the ticks are removed too.
:param minor_ticks: Whether or not to add minor ticks. They will be
drawn as dotted lines, rather than solid lines in
the axes space.
:type minor_ticks: bool
:return: None
Example:
.. plot::
:include-source:
import betterplotlib as bpl
bpl.default_style()
fig, (ax0, ax1) = bpl.subplots(figsize=[12, 5], ncols=2)
ax1.make_ax_dark()
ax0.set_title("Regular")
ax1.set_title("Dark")
"""
if mpl_2:
self.set_facecolor(colors.light_gray)
else:
self.set_axis_bgcolor(colors.light_gray)
self.grid(which="major", color="w", linestyle="-", linewidth=0.5)
if minor_ticks:
self.minorticks_on()
self.grid(which="minor", color="w", linestyle=":", linewidth=0.5)
self.set_axisbelow(True) # moves gridlines below the points
# remove all outer splines
self.remove_spines(["all"])
def remove_ticks(self, ticks_to_remove):
"""Removes ticks from the given locations.
In some situations, ticks aren't needed or wanted. Note that this
doesn't remove the spine itself, or the labels on that axis.
Note that this can break when used with the various `remove_*()`
functions. Order matters with these calls, presumably due to something
with the way matplotlib works under the hood. Mess around with it if
you're having trouble.
:param ticks_to_remove: locations where ticks need to be removed from.
Pass in a list, and choose from: "all, "top",
"bottom", "left", or "right".
:type ticks_to_remove: list
:return: None
.. plot::
:include-source:
import betterplotlib as bpl
bpl.default_style()
fig, (ax0, ax1) = bpl.subplots(ncols=2, figsize=[10, 5])
ax0.plot([0, 1, 2], [0, 1, 2])
ax1.plot([0, 1, 2], [0, 1, 2])
ax0.remove_ticks(["top", "right"])
ax1.remove_ticks(["all"])
ax0.set_title("removed top/right ticks")
ax1.set_title("removed all ticks")
"""
# TODO: doesn't work if they pass in a string, rather than a list
# If they want to remove all spines, turn that into workable infomation
ticks_to_remove = set(ticks_to_remove) # to remove duplicates
if "all" in ticks_to_remove:
# have to do weirdness since its a set
ticks_to_remove.remove("all")
for tick in ["left", "right", "top", "bottom"]:
ticks_to_remove.add(tick)
# matplotlib only allows setting which axes the ticks are on, so figure
# that out and set the ticks to only be on the desired axes.
if "left" in ticks_to_remove and "right" in ticks_to_remove:
self.yaxis.set_ticks_position("none")
elif "left" in ticks_to_remove:
self.yaxis.set_ticks_position("right")
elif "right" in ticks_to_remove:
self.yaxis.set_ticks_position("left")
if "top" in ticks_to_remove and "bottom" in ticks_to_remove:
self.xaxis.set_ticks_position("none")
elif "top" in ticks_to_remove:
self.xaxis.set_ticks_position("bottom")
elif "bottom" in ticks_to_remove:
self.xaxis.set_ticks_position("top")
def remove_spines(self, spines_to_remove):
"""Remove spines from the axis.
Spines are the lines on the side of the axes. In many situations, these
are not needed, and are just junk. Calling this function will remove
the specified spines from an axes object. Note that it does not remove
the tick labels if they are visible for that axis.
Note that this function can mess up if you call this function multiple
times with the same axes object, due to the way matplotlib works under
the hood. I haven't really tested it extensively (since I have never
wanted to call it more than once), but I think the last function call
is the one that counts. Calling this multiple times on the same axes
would be pointless, though, since you can specify multiple axes in one
call. If you really need to call it multiple times and it is breaking,
let me know and I can try to fix it. This also can break when used with
the various `remove_*()` functions. Order matters with these calls,
for some reason.
:param spines_to_remove: List of the desired spines to remove. Can
choose from "all", "top", "bottom", "left",
or "right".
:type spines_to_remove: list
:return: None
.. plot::
:include-source:
import betterplotlib as bpl
bpl.default_style()
fig, (ax0, ax1) = bpl.subplots(ncols=2, figsize=[10, 5])
ax0.plot([0, 1, 2], [0, 1, 2])
ax1.plot([0, 1, 2], [0, 1, 2])
ax0.remove_spines(["top", "right"])
ax1.remove_spines(["all"])
ax0.set_title("removed top/right spines")
ax1.set_title("removed all spines")
"""
# If they want to remove all spines, turn that into workable infomation
spines_to_remove = set(spines_to_remove) # to remove duplicates
if "all" in spines_to_remove:
spines_to_remove.remove("all")
for spine in ["left", "right", "top", "bottom"]:
spines_to_remove.add(spine)
# remove the spines
for spine in spines_to_remove:
self.spines[spine].set_visible(False)
# remove the ticks that correspond the the splines removed
self.remove_ticks(list(spines_to_remove))
def scatter(self, *args, **kwargs):
"""
Makes a scatter plot that looks nicer than the matplotlib default.
The call works just like a call to plt.scatter. It will set a few
default parameters, but anything you pass in will override the default
parameters. This function also uses the color cycle, unlike the default
scatter.
It also automatically determines a guess at the proper alpha
(transparency) of the points in the plot.
NOTE: the `c` parameter tells just the facecolor of the points, while
`color` specifies the whole color of the point, including the edge line
color. This follows the default matplotlib scatter implementation.
:param args: non-keyword arguments that will be passed on to the
plt.scatter function. These will typically be the x and y
lists.
:param kwargs: keyword arguments that will be passed on to plt.scatter.
:return: the output of the plt.scatter call is returned directly.
.. plot::
:include-source:
import betterplotlib as bpl
import matplotlib.pyplot as plt
import numpy as np
bpl.default_style()
x = np.random.normal(0, scale=0.5, size=500)
y = np.random.normal(0, scale=0.5, size=500)
fig = plt.figure(figsize=[15, 7])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection="bpl")
for ax in [ax1, ax2]:
ax.scatter(x, y)
ax.scatter(x+0.5, y+0.5)
ax.scatter(x+1, y+1)
ax1.set_title("matplotlib")
ax2.add_labels(title="betterplotlib")
"""
# get the color, if it hasn't already been set. I don't need to do this
# in mpl 2.0 technically, but I do it anyway so I can use this color
# for the invisible label below.
if 'color' not in kwargs and 'c' not in kwargs:
# get the default color cycle, and get the next color.
if sys.version_info.major == 2:
kwargs['c'] = self._get_lines.prop_cycler.next()['color']
elif sys.version_info.major == 3:
kwargs['c'] = next(self._get_lines.prop_cycler)['color']
# set other parameters, if they haven't been set already
# I use setdefault to do that, which puts the values in if they don't
# already exist, but won't overwrite anything.
# first set the edge color for the points
# only do this for large datasets in mpl 1.x
if not mpl_2 and len(args[0]) > 30:
kwargs.setdefault('linewidth', 0.25)
# we also need to set the edge color of the markers
# edgecolor is a weird case, since it shouldn't be set if the user
# specifies 'color', since that refers to the whole point, not just
# the color of the point. It includes the edge color.
if 'color' not in kwargs:
kwargs.setdefault('edgecolor', kwargs["c"])
# use the function we defined to get the proper alpha value.
try:
kwargs.setdefault('alpha', _tools._alpha(len(args[0])))
except TypeError:
kwargs.setdefault("alpha", 1.0)
# we want to make the points in the legend opaque always. To do this
# we plot nans with all the same parameters, but with alpha of one.
if "label" in kwargs:
# we don't want to plot any data here, so exclude the data if
# it exists. We'll exclude the "x" and "y" kwargs below, too
if len(args) >= 2:
label_args = args[2:]
# we need to process the kwargs a little before plotting the fake
# data, so make a copy of them
label_kwargs = kwargs.copy()
# exclude any plotted data, if it is in a kwarg
label_kwargs.pop("x", None)
label_kwargs.pop("y", None)
# set the alpha to one, which is the whole point
label_kwargs["alpha"] = 1.0
# we can then plot the fake data. Due to weirdness in matplotlib, we
# have to plot a two element NaN list.
super(Axes_bpl, self).scatter([np.nan, np.nan], [np.nan, np.nan],
*label_args, **label_kwargs)
# in the main plotting we don't want to have a label, so we pop it.
kwargs.pop("label")
# we then plot the main data
return super(Axes_bpl, self).scatter(*args, **kwargs)
def hist(self, *args, **kwargs):
"""
A better histogram function. Also supports relative frequency plots, bin
size, and hatching better than the default matplotlib implementation.
Everything is the same as the default matplotlib implementation, with
the exception a few keyword parameters. `rel_freq` makes the histogram a
relative frequency plot and `bin_size` controls the width of each bin.
:param args: non-keyword arguments that will be passed on to the
plt.hist() function. These will typically be the list of
values.
:keyword rel_freq: Whether or not to plot the histogram as a relative
frequency histogram. Note that this plots the
relative frequency of each bin compared to the whole
sample. Even if your range excludes some of the data,
it will still be included in the relative frequency
calculation.
:type rel_freq: bool
:keyword bin_size: The width of the bins in the histogram. The bin
boundaries will start at zero, and will be integer
multiples of bin_size from there. Specify either
this, or bins, but not both.
:type bin_size: float
:keyword kwargs: additional controls that will be passed on through to
the plt.hist() function.
:return: same output as plt.hist()
Examples:
The basic histogram should look nicer than the default histogram.
.. plot::
:include-source:
import betterplotlib as bpl
import matplotlib.pyplot as plt
import numpy as np
bpl.default_style()
data = np.random.normal(0, 2, 10000)
fig = plt.figure(figsize=[15, 7])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection="bpl")
ax1.hist(data)
ax2.hist(data)
ax1.set_title("matplotlib")
ax2.add_labels(title="betterplotlib")
There are also plenty of options that make other histograms look nice
too.
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
data1 = np.random.normal(-6, 1, size=10000)
data2 = np.random.normal(-2, 1, size=10000)
data3 = np.random.normal(2, 1, size=10000)
data4 = np.random.normal(6, 1, size=10000)
bin_size = 0.5
bpl.hist(data1, rel_freq=True, bin_size=bin_size)
bpl.hist(data2, rel_freq=True, bin_size=bin_size, histtype="step",
linewidth=5)
bpl.hist(data3, rel_freq=True, bin_size=bin_size,
histtype="stepfilled", hatch="o", alpha=0.8)
bpl.hist(data4, rel_freq=True, bin_size=bin_size, histtype="step",
hatch="x", linewidth=4)
bpl.add_labels(y_label="Relative Frequency")
"""
# TODO: Add documentatino for examples of bin_size
# I like white as an edgecolor if we use bars.
if "histtype" not in kwargs or kwargs["histtype"] != "step":
kwargs.setdefault('edgecolor', 'white')
# do the relative frequency business if we need to
if kwargs.pop("rel_freq", False):
# check that they didn't set weights, since that's what I'll change
if "weights" in kwargs:
raise ValueError("The `weights` keyword can't be used with "
"`rel_freq`, since `rel_freq` works by "
"modifying the weights.")
# normed doesn't work either.
if "normed" in kwargs and kwargs["normed"] is True:
raise ValueError("Normed does not work properly with rel_freq.")
# the data will be the first arg.
data = args[0]
# we weight each item by 1/total items.
kwargs["weights"] = [1.0 / len(data)] * len(data)
# if they didn't specify the binning, use our binning
if "bin_size" in kwargs and "bins" in kwargs:
raise ValueError("The `bins` and `bin_size` keywords cannot be "
"used together. Use `bins` if you want to "
"pass your own bins, or use `bin_size` to "
"have the code determine its own bins. ")
kwargs.setdefault("bin_size", _tools._rounded_bin_width(args[0]))
kwargs.setdefault("bins", _tools._binning(args[0],
kwargs.pop("bin_size")))
# plot the histogram, and keep the results
return super(Axes_bpl, self).hist(*args, **kwargs)
def add_labels(self, x_label=None, y_label=None, title=None,
*args, **kwargs):
"""
Adds labels to the x and y axis, plus a title.
Addition properties will be passed the all single label creations,
so any properties will be applied to all. If you want the title to be
different, for example, don't include it here.
:param x_label: label for the x axis
:type x_label: str
:param y_label: label for the y axis
:type y_label: str
:param title: title for the given axis
:type title: str
:param args: additional properties that will be passed on to all the
labels you asked for.
:param kwargs: additional keyword arguments that will be passed on to
all the labels you make.
:return: None
Example:
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
xs = np.arange(0, 10, 0.1)
ys = xs**2
bpl.plot(xs, ys)
bpl.add_labels("X value", "Y value", "Title")
"""
if x_label is not None:
self.set_xlabel(x_label, *args, **kwargs)
if y_label is not None:
self.set_ylabel(y_label, *args, **kwargs)
if title is not None:
self.set_title(title, *args, **kwargs)
def set_limits(self, x_min=None, x_max=None, y_min=None, y_max=None,
**kwargs):
"""
Set axes limits for both x and y axis at once.
Any additional kwargs will be passed on to the matplotlib functions
that set the limits, so refer to that documentation to find the
allowed parameters.
:param x_min: minimum x value to be plotted
:type x_min: int, float
:param x_max: maximum x value to be plotted
:type x_max: int, float
:param y_min: minimum y value to be plotted
:type y_min: int, float
:param y_max: maximum y value to be plotted
:type y_max: int, float
:param kwargs: Kwargs for the set_limits() functions.
:return: none.
Example:
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
xs = np.arange(0, 10, 0.01)
ys = np.cos(xs)
fig, [ax1, ax2] = bpl.subplots(ncols=2)
ax1.plot(xs, ys)
ax2.plot(xs, ys)
ax2.set_limits(0, 2*np.pi, -1.1, 1.1)
"""
# Any None values won't change the plot any.
self.set_xlim([x_min, x_max], **kwargs)
self.set_ylim([y_min, y_max], **kwargs)
def add_text(self, x, y, text, coords="data", **kwargs):
"""
Adds text to the specified location. Allows for easy specification of
the type of coordinates you are specifying.
Matplotlib allows the text to be in data or axes coordinates, but it's
hard to remember the command for that. This fixes that. The param
`coords` takes care of that.
The x and y locations can be specified in either data or axes coords.
If data coords are used, the text is placed at that data point. If axes
coords are used, the text is placed relative to the axes. (0,0) is the
bottom left, (1,1) is the top right. Remember to use the
horizontalalignment and verticalalignment parameters if it isn't quite
in the spot you expect.
Also consider using easy_add_text, which gives 9 possible location to
add text with minimal consternation.
:param x: x location of the text to be added.
:type x: int, float
:param y: y location of the text to be added.
:type y: int, float
:param text: text to be added
:type text: str
:param coords: type of coordinates. This parameter can be either 'data'
or 'axes'. 'data' puts the text at that data point. 'axes' puts the
text in that location relative the axes. See above.
:type coords: str
:param kwargs: any additional keyword arguments to pass on the text
function. Pass things you would pass to plt.text()
:return: Same as output of plt.text().
Example:
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
xs = np.arange(0, 7, 0.1)
ys = xs**2
fig, ax = bpl.subplots()
ax.plot(xs, ys)
ax.add_text(2, 30, "(2, 30) data", ha="center", va="center")
ax.add_text(0.6, 0.2, "60% across, 20% up", "axes")
"""
# this function takes care of the transform keyword already, so don't
# allow the user to specify it.
if "transform" in kwargs:
raise ValueError("add_text takes care of the transform for you when"
" you specify coords. \n"
"Don't specify transform in this function.")
# set the proper coordinate transformation
if coords == "data":
transform = self.transData
elif coords == "axes":
transform = self.transAxes
else:
raise ValueError("`coords` must be either 'data' or 'axes'")
# putting it in kwargs makes it easier to pass on.
kwargs["transform"] = transform
# add the text
return self.text(x, y, text, **kwargs)
def remove_labels(self, labels_to_remove):
"""
Removes the labels and tick marks from an axis border.
This is useful for making conceptual plots where the numbers on the axis
don't matter. Axes labels still work, also.
Note that this can break when used with the various `remove_*()`
functions. Order matters with these calls, presumably due to something
with the way matplotlib works under the hood. Mess around with it if
you're having trouble.
:param labels_to_remove: location of labels to remove. Choose from:
"both", "x", or "y".
:type labels_to_remove: str
:return: None
Example:
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
xs = np.arange(0, 5, 0.1)
ys = xs**2
fig, ax = bpl.subplots()
ax.plot(xs, ys)
ax.remove_labels("y")
ax.remove_ticks(["top"])
ax.add_labels("Conceptual plot", "Axes labels still work")
"""
# validate their input
if labels_to_remove not in ["both", "x", "y"]:
raise ValueError('Please pass in either "x", "y", or "both".')
# then set the tick parameters.
self.tick_params(axis=labels_to_remove, bottom=False, top=False,
left=False, right=False, labelbottom=False,
labeltop=False, labelleft=False, labelright=False)
def legend(self, linewidth=0, *args, **kwargs):
"""Create a nice looking legend.
Works by calling the ax.legend() function with the given args and
kwargs. If some are not specified, they will be filled with values that
make the legend look nice.
:param linewidth: linewidth of the border of the legend. Defaults to
zero.
:param args: non-keyword arguments passed on to the ax.legend() fuction.
:param kwargs: keyword arguments that will be passed on to the
ax.legend() function. This will be things like loc,
and title, etc.
:return: legend object returned by the ax.legend() function.
The default legend is a transparent background with no border, like so.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
import matplotlib.pyplot as plt
bpl.default_style()
x = np.arange(0, 5, 0.1)
fig = plt.figure(figsize=[15, 7])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection="bpl") # bpl subplot.
for ax in [ax1, ax2]:
ax.plot(x, x, label="x")
ax.plot(x, 2*x, label="2x")
ax.plot(x, 3*x, label="3x")
ax.legend(loc=2)
ax1.set_title("matplotlib")
ax2.set_title("betterplotlib")
You can still pass in any kwargs to the legend function you want.
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
x = np.arange(0, 5, 0.1)
fig, ax = bpl.subplots()
ax.plot(x, x, label="x")
ax.plot(x, 2*x, label="2x")
ax.plot(x, 3*x, label="3x")
ax.legend(fontsize=20, loc=6, title="Title")
"""
# push the legend a little farther away from the edge.
kwargs.setdefault('borderaxespad', 0.75)
leg = super(Axes_bpl, self).legend(*args, **kwargs)
# TODO: set the fontsize of the title properly. The best way to do it is
# probably to get the font from one of the other text objects, then
# increment that slightly, then set the title's fontsize to be that.
# the fontsize param doesn't change the title, so do that manually
# title = legend.get_title()
# title.set_fontsize(kwargs['fontsize'])
if leg is not None:
# turn the background into whatever color it needs to be
frame = leg.get_frame()
frame.set_linewidth(linewidth)
if not mpl_2:
frame.set_alpha(0.6)
return leg
def equal_scale(self):
""" Makes the x and y axes have the same scale.
Useful for plotting things like ra and dec, something with the same
quantity on both axes, or anytime the x and y axis have the same scale.
It's really one one command, but it's one I have a hard time
remembering.
Note that this keeps the range the same from the plot as before, so you
may want to adjust the limits to make the plot look better. It will
keep the axes adjusted the same, though, no matter how you change the
limits afterward.
:return: None
Examples:
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
# make a Gaussian with more spread in y direction
xs = np.random.normal(0, 1, 10000)
ys = np.random.normal(0, 2, 10000)
fig, [ax1, ax2] = bpl.subplots(figsize=[12, 5], ncols=2)
ax1.scatter(xs, ys)
ax2.scatter(xs, ys)
ax2.equal_scale()
ax1.add_labels(title="Looks symmetric")
ax2.add_labels(title="Shows true shape")
Here is proof that changing the limits don't change the scaling between
the axes.
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.default_style()
# make a Gaussian with more spread in y direction
xs = np.random.normal(0, 1, 10000)
ys = np.random.normal(0, 2, 10000)
fig, [ax1, ax2] = bpl.subplots(figsize=[12, 5], ncols=2)
ax1.scatter(xs, ys)
ax2.scatter(xs, ys)
ax1.equal_scale()
ax2.equal_scale()
ax1.set_limits(-10, 10, -4, 4)
ax2.set_limits(-5, 5, -10, 10)
"""
self.set_aspect("equal", adjustable="box")
def easy_add_text(self, text, location, **kwargs):
"""
Adds text in common spots easily.
This was inspired by the plt.legend() function and its loc parameter,
which allows for easy placement of legends. This does a similar thing,
but just for text.
VERY IMPORTANT NOTE: Although this works similar to plt.legend()'s loc
parameter, the numbering is NOT the same. My numbering is based on the
keypad. 1 is in the bottom left, 5 in the center, and 9 in the top
right. You can also specify words that tell the location.
:param text: Text to add to the axes.
:type text: str
:param location: Location to add the text. This can be specified two
in two possible ways. You can pass an integer, which
puts the text at the location corresponding to that
number's location on a standard keyboard numpad.
You can also pass a string that describe the location.
'upper', 'center', and 'lower' describe the vertical
location, and 'left', 'center', and 'right' describe
the horizontal location. You need to specify vertical,
then horizontal, like 'upper right'. Note that
'center' is the code for the center, not
'center center'.
:type location: str, int
:param kwargs: additional text parameters that will be passed on to the
plt.text() function. Note that this function controls the
x and y location, as well as the horizonatl and vertical
alignment, so do not pass those parameters.
:return: Same as output of plt.text()
Example:
There are two ways to specify the location, and we will demo both.
.. plot::
:include-source:
import betterplotlib as bpl
bpl.default_style()
bpl.easy_add_text("1", 1)
bpl.easy_add_text("2", 2)
bpl.easy_add_text("3", 3)
bpl.easy_add_text("4", 4)
bpl.easy_add_text("5", 5)
bpl.easy_add_text("6", 6)
bpl.easy_add_text("7", 7)
bpl.easy_add_text("8", 8)
bpl.easy_add_text("9", 9)
.. plot::
:include-source:
import betterplotlib as bpl
bpl.default_style()
bpl.easy_add_text("upper left", "upper left")
bpl.easy_add_text("upper center", "upper center")
bpl.easy_add_text("upper right", "upper right")
bpl.easy_add_text("center left", "center left")
bpl.easy_add_text("center", "center")
bpl.easy_add_text("center right", "center right")
bpl.easy_add_text("lower left", "lower left")
bpl.easy_add_text("lower center", "lower center")
bpl.easy_add_text("lower right", "lower right")
"""
# check that the user didn't specify parameters I want to control.
if 'ha' in kwargs or 'va' in kwargs or 'horizontalalignment' in kwargs \
or 'verticalalignment' in kwargs:
raise ValueError("This function controls the alignment. Do not"
"pass it in.")
# then check each different case, and set the parameters we want to use.
if location == 1 or location == "lower left":
x_value = 0.04
y_value = 0.04
kwargs['horizontalalignment'] = "left"
kwargs['verticalalignment'] = "bottom"
elif location == 2 or location == "lower center":
x_value = 0.5
y_value = 0.04
kwargs['horizontalalignment'] = "center"
kwargs['verticalalignment'] = "bottom"
elif location == 3 or location == "lower right":
x_value = 0.96
y_value = 0.04
kwargs['horizontalalignment'] = "right"
kwargs['verticalalignment'] = "bottom"
elif location == 4 or location == "center left":
x_value = 0.04
y_value = 0.5
kwargs['horizontalalignment'] = "left"
kwargs['verticalalignment'] = "center"
elif location == 5 or location == "center":
x_value = 0.5
y_value = 0.5
kwargs['horizontalalignment'] = "center"
kwargs['verticalalignment'] = "center"
elif location == 6 or location == "center right":
x_value = 0.96
y_value = 0.5
kwargs['horizontalalignment'] = "right"
kwargs['verticalalignment'] = "center"
elif location == 7 or location == "upper left":
x_value = 0.04
y_value = 0.96
kwargs['horizontalalignment'] = "left"
kwargs['verticalalignment'] = "top"
elif location == 8 or location == "upper center":
x_value = 0.5
y_value = 0.96
kwargs['horizontalalignment'] = "center"
kwargs['verticalalignment'] = "top"
elif location == 9 or location == "upper right":
x_value = 0.96
y_value = 0.96
kwargs['horizontalalignment'] = "right"
kwargs['verticalalignment'] = "top"
else:
raise ValueError("loc was not specified properly.")
# then add the text.
return self.add_text(x_value, y_value, text, coords="axes", **kwargs)
def contour_scatter(self, xs, ys, fill_cmap="white", bin_size=None,
min_level=5, num_contours=7, scatter_kwargs=dict(),
contour_kwargs=dict()):
"""
Create a contour plot with scatter points in the sparse regions.
When a dataset is large, plotting a scatterplot is often really hard to
understand, due to many points overlapping and the high density of
points overall. A contour or hexbin plot solves many of these problems,
but these still have the disadvantage of making outliers less obvious.
A simple solution is to plot contours in the dense regions, while
plotting individual points where the density is low. That is what
this function does.
Here's how this works under the hood. Skip this paragraph if you don't
care; it won't affect how you use this. This function uses the numpy
2D histogram function to create an array representing the density in
each region. If no binning info is specified by the user, the
Freedman-Diaconis algorithm is used in both dimensions to find the
ideal bin size for the data. First, an opaque filled contour is
plotted, then the contour lines are put on top. Then the outermost
contour is made into a matplotlib path object, which lets us
check which of the points are outside of this contour. Only the points
that are outside are plotted.
The parameters of this function are more complicated than others in
betterplotlib and are somewhat arbitrary, so please read the info below
carefully. The examples should make things more clear also.
:param xs: list of x values of your data
:type xs: list
:param ys: list of y values of your data
:type ys: list
:param fill_cmap: Colormap that will fill the opaque contours. Defaults
to "white", which is just a solid white fill. You can
pass any name of a matplotlib colormap, as well as
some options I have created. "background_grey" gives
a solid fill that is the same color as the
make_ax_dark() background. "modified_greys" is a
colormap that starts at the "background_grey" color,
then transitions to black.
:type fill_cmap: str
:param bin_size: Size of the bins used in the 2D histogram. This is kind
of an arbitraty parameter. The code will guess a
value for this if none is passed in, but this value
isn't always good. A smaller value gives noisier
contours. A value that is too large will lead to
"chunky" contours. Adjust this until your contours
look good to your eye. That's the best
way to pick a value for this parameter. It can be
either a scalar, in which case that will be used for
both x and y. It could also be a two element list, in
which case it will be the bin size for x and y.
:type bin_size: float or list
:param min_level: This is another arbitrary parameter that determines
how high the density of points needs to be before
the outer contour is drawn. The higher the value,
the more points will be outside the last contour.
Again, adjust this until it looks good to your eye.
The default parameter choice will generally be okay,
though. Also note that if you want to specify the
levels yourself, use the `levels` keyword.
:type min_level: int
:param num_contours: Number of contour lines to be drawn between the
lowest and highest density regions. Adjust this
until the plot looks good to your eye. Also note
that if you want to specify the levels yourself,
use the `levels` keyword.
:type num_contours: int
:param scatter_kwargs: This is a dictionary of keywords that will be
passed on to the `bpl.scatter()` function. Note
that this doesn't work like normal kwargs. You
need to pass in a dictionary. This is because we
have to separate the kwargs that go to the
scatter function from the ones that go to the
ontour function.
:type scatter_kwargs: dict
:param contour_kwargs: This is a dictionary of keywords that will be
passed on to the `plt.contour()` function. Note
that this doesn't work like normal kwargs. You
need to pass in a dictionary. This is because we
have to separate the kwargs that go to the
scatter function from the ones that go to the
contour function.
:type contour_kwargs: dict
:return: The output of the `contour` call will be returned. This doesn't
need to be saved, you can use it if you want.
Examples
First, we'll show why this plot is useful. This won't use any of the
fancy settings, other than `bin_size`, which is used to make the
contours look nicer.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
xs = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(0, 1, 100000)])
ys = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(3, 1, 100000)])
fig, (ax1, ax2) = bpl.subplots(ncols=2, figsize=[10, 5])
ax1.scatter(xs, ys)
ax2.contour_scatter(xs, ys, bin_size=0.3)
The scatter plot is okay, but the contour makes things easier to see.
We'll now mess with some of the other parameters. This plot shows how
the `bin_size` parameter changes things.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
xs = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(0, 1, 100000)])
ys = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(3, 1, 100000)])
fig, (ax1, ax2, ax3) = bpl.subplots(ncols=3, figsize=[15, 5])
ax1.contour_scatter(xs, ys, bin_size=0.1)
ax2.contour_scatter(xs, ys, bin_size=0.2)
ax3.contour_scatter(xs, ys, bin_size=0.5)
You can see how small values of `bin_size` lead to more noisy contours.
The code will attempt to choose its own value of `bin_size` if nothing
is specified, but it's normally not a very good choice.
For a given value of `bin_size`, changing `min_level` adjusts the height
at which the first contours are drawn.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
xs = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(0, 1, 100000)])
ys = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(3, 1, 100000)])
fig, (ax1, ax2, ax3) = bpl.subplots(ncols=3, figsize=[15, 5])
ax1.contour_scatter(xs, ys, bin_size=0.3, min_level=2)
ax2.contour_scatter(xs, ys, bin_size=0.3, min_level=15)
ax3.contour_scatter(xs, ys, bin_size=0.3, min_level=50)
The code sets `min_level = 5` if you don't set it.
As expected, `num_contours` adjusts the number of contors drawn.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
xs = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(0, 1, 100000)])
ys = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(3, 1, 100000)])
fig, (ax1, ax2, ax3) = bpl.subplots(ncols=3, figsize=[15, 5])
ax1.contour_scatter(xs, ys, bin_size=0.3, num_contours=2)
ax2.contour_scatter(xs, ys, bin_size=0.3, num_contours=5)
ax3.contour_scatter(xs, ys, bin_size=0.3, num_contours=10)
Now we can mess with the fun stuff, which is the `fill_cmap` param and
the kwargs that get passed to the `scatter` and `contour` function
calls. There is a lot of stuff going on here, just for demonstration
purposes. Note that the code has some default parameters that it will
choose if you don't specify anything.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
xs = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(0, 1, 100000)])
ys = np.concatenate([np.random.normal(0, 1, 100000),
np.random.normal(3, 1, 100000),
np.random.normal(3, 1, 100000)])
fig, axs = bpl.subplots(nrows=2, ncols=2)
[ax1, ax2], [ax3, ax4] = axs
ax1.contour_scatter(xs, ys, bin_size=0.3,
fill_cmap="background_grey",
contour_kwargs={"cmap":"magma"},
scatter_kwargs={"s":10, "c":bpl.almost_black})
ax1.make_ax_dark()
# or we can choose our own `fill_cmap`
ax2.contour_scatter(xs, ys, bin_size=0.3, fill_cmap="viridis",
contour_kwargs={"linewidths":1,
"colors":"white"},
scatter_kwargs={"s":50, "c":bpl.color_cycle[3],
"alpha":0.3})
# There are also my colormaps that work with the dark axes
ax3.contour_scatter(xs, ys, bin_size=0.3, fill_cmap="modified_greys",
num_contours=7,
scatter_kwargs={"c": bpl.color_cycle[0]},
contour_kwargs={"linewidths":[2,0,0,0,0,0,0],
"colors":bpl.almost_black})
ax3.make_ax_dark(ax3)
# the default `fill_cmap` is white.
ax4.contour_scatter(xs, ys, bin_size=0.3, num_contours=3,
scatter_kwargs={"marker":"^", "linewidth":0.2,
"c":bpl.color_cycle[1], "s":20},
contour_kwargs={"linestyles":["solid", "dashed",
"dashed", "dashed"],
"colors":bpl.almost_black})
Note that the contours will work appropriately for datasets with
"holes", as demonstrated here.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
rad1 = np.random.normal(10, 0.75, 100000)
theta1 = np.random.uniform(0, 2 * np.pi, 100000)
x1 = [r * np.cos(t) for r, t in zip(rad1, theta1)]
y1 = [r * np.sin(t) for r, t in zip(rad1, theta1)]
rad2 = np.random.normal(20, 0.75, 200000)
theta2 = np.random.uniform(0, 2 * np.pi, 200000)
x2 = [r * np.cos(t) for r, t in zip(rad2, theta2)]
y2 = [r * np.sin(t) for r, t in zip(rad2, theta2)]
rad3 = np.random.normal(12, 0.75, 120000)
theta3 = np.random.uniform(0, 2 * np.pi, 120000)
x3 = [r * np.cos(t) + 10 for r, t in zip(rad3, theta3)]
y3 = [r * np.sin(t) + 10 for r, t in zip(rad3, theta3)]
x4 = np.random.uniform(-20, 20, 35000)
y4 = x4 + np.random.normal(0, 0.5, 35000)
y5 = y4 * (-1)
xs = np.concatenate([x1, x2, x3, x4, x4])
ys = np.concatenate([y1, y2, y3, y4, y5])
fig, ax = bpl.subplots()
ax.contour_scatter(xs, ys)
ax.equal_scale()
"""
# first get the density info we need to make contours
x_centers, y_centers, hist = _tools._make_density_contours(xs, ys,
bin_size)
# then determine what our colormap for the fill will be
if fill_cmap == "white":
# colormap with one color: white
fill_cmap = mpl_colors.ListedColormap(colors="white", N=1)
elif fill_cmap == "background_grey":
# colormap with one color: the light grey used in backgrounds
fill_cmap = mpl_colors.ListedColormap(colors=colors.light_gray, N=1)
elif fill_cmap == "modified_greys":
# make one that transitions from light grey to black
these_colors = [colors.light_gray, "black"]
fill_cmap = mpl_colors.LinearSegmentedColormap.from_list("mod_gray",
these_colors)
# then we can set a bunch of default parameters for the contours
contour_kwargs.setdefault("linewidths", 2)
contour_kwargs["zorder"] = 3
if "colors" not in contour_kwargs:
contour_kwargs.setdefault("cmap", "viridis")
# We then want to find the correct heights for the levels of the contours
max_hist = int(np.ceil(max(hist.flatten())))
if max_hist < min_level:
raise ValueError("Min_level needs to be lower. This will be fixed.")
#TODO: actually fix this!
levels = np.linspace(min_level, max_hist, num_contours + 1)
# we add one to the number of contours because we have the highest one at
# the highest point, so it won't be shown.
contour_kwargs["levels"] = levels
# we can then go ahead and plot the filled contours, then the contour lines
super(Axes_bpl, self).contourf(x_centers, y_centers, hist, levels=levels,
cmap=fill_cmap, zorder=2)
contours = super(Axes_bpl, self).contour(x_centers, y_centers, hist,
**contour_kwargs)
# we saved the output from the contour, since it has information about the
# shape of the contours we can use to figure out which points are outside
# and therefore need to be plotted. There may be multiple outside contours,
# especially if the shape is complicated, so we test to see how many
# each point is inside
shapes_in = np.zeros(len(xs))
for line in contours.collections[0].get_segments():
# make a closed shape with the line
polygon = path.Path(line, closed=True)
# then figure out which points are inside it
shapes_in += polygon.contains_points(list(zip(xs, ys)))
# the ones that need to be hidden are inside an odd number of shapes. This
# shounds weird, but actually works. If we have a ring of points, the
# outliers in the middle will be inside the outermost and innermost
# contours, so they are inside two shapes. We want to plot these. So we
# plot the ones that are divisible by two.
plot_idx = np.where(shapes_in % 2 == 0)
# We then get these elements. The multiple indexing is only supported for
# numpy arrays, not Python lists, so convert our values to that first.
outside_xs = np.array(xs)[plot_idx]
outside_ys = np.array(ys)[plot_idx]
# now we can do our scatterplot.
scatter_kwargs.setdefault("alpha", 1.0)
scatter_kwargs.setdefault("s", 10)
if "c" not in scatter_kwargs:
scatter_kwargs.setdefault("color", colors.almost_black)
scatter_kwargs["zorder"] = 1
self.scatter(outside_xs, outside_ys, **scatter_kwargs)
return contours
def data_ticks(self, x_data, y_data, extent=0.015, *args, **kwargs):
"""
Puts tiny ticks on the axis borders making the location of each point.
:param x_data: list of values to mark on the x-axis.
:type x_data: list
:param y_data: list of values to mark on the y-axis. This doesn't have
to be the same length as `x-data`, necessarily.
:type y_data: list
:param extent: How far the ticks go up from the x-axis. The default is
0.02, meaning the ticks go 2% of the way to the top of
the plot. Note that the ticks created by this function
will have the same physical size on both axes. Since in
general the x and y axes aren't the same physical size,
the ticks on the y-axis will be scaled to match the
physical size of the x ticks. This means that in the
default case, the y ticks won't cover 2% of the axis, but
again will be the same physical size as the x ticks.
:type extent: float
:param args: Additional arguments to pass to the `axvline` and `axhline`
functions, which is what is used to make each tick.
:param kwargs: Additional keyword arguments to pass to the `axvline` and
`axhline` functions. `color` is an important one here,
and it defaults to `almost_black` here.
Example
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
xs = np.random.normal(0, 1, 100)
ys = np.random.normal(0, 1, 100)
fig, ax = bpl.subplots()
ax.scatter(xs, ys)
ax.data_ticks(xs, ys)
"""
kwargs.setdefault("color", colors.almost_black)
kwargs.setdefault("linewidth", 0.5)
for x in x_data:
self.axvline(x, ymin=0, ymax=extent, *args, **kwargs)
# Since the matplotlib command to ax(h/v)line uses an extent based on
# percentage of the way to the end, to get the same physical size for
# both axes, we have to scale based on the size of the axes
h_extent = (self.bbox.height / self.bbox.width) * extent
for y in y_data:
self.axhline(y, xmin=0, xmax=h_extent, *args, **kwargs)
def plot(self, *args, **kwargs):
"""A slightly improved plot function.
This is best used for plotting lines, while the `scatter()` function
is best used for plotting points.
Currently all this does is make the lines thicker, which looks better.
There isn't any added functionality.
The parameters here are the exact same as they are for the regular
`plt.plot()` or `ax.plot()` functions, so I don't think any
documentation would be helpful.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
import matplotlib.pyplot as plt
bpl.default_style()
xs = np.arange(0, 1, 0.01)
ys_1 = xs
ys_2 = xs**2
fig = plt.figure(figsize=[15, 7])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection="bpl") # bpl subplot.
ax1.plot(xs, ys_1)
ax1.plot(xs, ys_2)
ax2.plot(xs, ys_1)
ax2.plot(xs, ys_2)
ax1.set_title("matplotlib")
ax2.set_title("betterplotlib")
"""
# set the linewidth to a thicker value. There are two keys here, though,
# so we have to be careful.
if not ("lw" in kwargs or "linewidth" in kwargs):
kwargs.setdefault("lw", 3)
return super(Axes_bpl, self).plot(*args, **kwargs)
def axvline(self, x=0, *args, **kwargs):
""" Place a vertical line at some point on the axes.
:param x: Data value on the x-axis to place the line.
:type x: float
:param args: Additional parameters that will be passed on the the
regular `plt.axvline` function. See it's documentation
for details.
:param kwargs: Similarly, additional keyword arguments that will be
passed on to the regular `plt.axvline` function.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
left_xs = np.arange(-20, 1, 0.01)
right_xs = np.arange(1.001, 20, 0.01)
left_ys = left_xs / (left_xs - 1)
right_ys = right_xs / (right_xs - 1)
fig, ax = bpl.subplots()
ax.make_ax_dark()
ax.plot(left_xs, left_ys, c=bpl.color_cycle[2])
ax.plot(right_xs, right_ys, c=bpl.color_cycle[2])
ax.axvline(1.0, linestyle="--")
ax.axhline(1.0, linestyle="--")
ax.set_limits(-10, 10, -10, 10)
"""
# set the color to be almost black. Matplotlib has two keywords for
# color, so we need to check both here.
if not ("c" in kwargs or "color" in kwargs):
kwargs.setdefault("c", colors.almost_black)
return super(Axes_bpl, self).axvline(x, *args, **kwargs)
def axhline(self, y=0, *args, **kwargs):
""" Place a horizontal line at some point on the axes.
:param y: Data value on the y-axis to place the line.
:type y: float
:param args: Additional parameters that will be passed on the the
regular `plt.axhline` function. See it's documentation
for details.
:param kwargs: Similarly, additional keyword arguments that will be
passed on to the regular `plt.axhline` function.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
bpl.default_style()
left_xs = np.arange(-20, 1, 0.01)
right_xs = np.arange(1.001, 20, 0.01)
left_ys = left_xs / (left_xs - 1)
right_ys = right_xs / (right_xs - 1)
fig, ax = bpl.subplots()
ax.make_ax_dark()
ax.plot(left_xs, left_ys, c=bpl.color_cycle[2])
ax.plot(right_xs, right_ys, c=bpl.color_cycle[2])
ax.axvline(1.0, linestyle="--")
ax.axhline(1.0, linestyle="--")
ax.set_limits(-10, 10, -10, 10)
"""
# set the color to be almost black. Matplotlib has two keywords for
# color, so we need to check both here.
if not ("c" in kwargs or "color" in kwargs):
kwargs.setdefault("c", colors.almost_black)
return super(Axes_bpl, self).axhline(y, *args, **kwargs)
def errorbar(self, *args, **kwargs):
"""Wrapper for the plt.errorbar() function.
Style changes: capsize is automatically zero, and the format is
automatically a scatter plot, rather than the connected lines that
are used by default otherwise. It also adds a black marker edge to
distinguish the markers when there are lots of data poitns. Otherwise
everything blends together.
.. plot::
:include-source:
import numpy as np
import betterplotlib as bpl
import matplotlib.pyplot as plt
bpl.default_style()
xs = np.random.normal(0, 1, 100)
ys = np.random.normal(0, 1, 100)
yerr = np.random.uniform(0.3, 0.8, 100)
xerr = np.random.uniform(0.3, 0.8, 100)
fig = plt.figure(figsize=[15, 7])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection="bpl") # bpl subplot.
for ax in [ax1, ax2]:
ax.errorbar(xs, ys, xerr=xerr, yerr=yerr, label="set 1")
ax.errorbar(xs+1, ys+1, xerr=xerr, yerr=yerr, label="set 2")
ax.legend()
ax1.set_title("matplotlib")
ax2.set_title("betterplotlib")
"""
kwargs.setdefault("capsize", 0)
kwargs.setdefault("fmt", "o")
kwargs.setdefault('markeredgewidth', 0.25)
kwargs.setdefault('markeredgecolor', colors.almost_black)
return super(Axes_bpl, self).errorbar(*args, **kwargs)
def twin_axis_simple(self, axis, lower_lim, upper_lim, label="", log=False):
"""Creates a differently scaled axis on either the top or the left.
This can be used to put multiple scales on one plot for easier
comparison. Some examples might be distance/time, redshift/age, or
any two related quantities.
Note that this only does simple scalings of the new axes, which will
still only be linear or log scaled axes. If you want a function that
smartly places labels based on a function that takes one set of axes
values to another (in a potentially nonlinear way), the other function
I haven't made will do that.
:param axis: Where the new scaled axis will be placed. Must
either be "x" or "y".
:param lower_lim: Value to be put on the left/bottom of the newly
created axis.
:param upper_lim: Value to be put on the right/top of the newly
created axis.
:param label: The label to put on this new axis.
:param log: Whether or not to log scale this axis.
:returns: the new axes
.. plot::
:include-source:
import betterplotlib as bpl
bpl.presentation_style()
fig, ax = bpl.subplots(tight_layout=True)
ax.set_limits(0, 10, 0, 5)
ax.add_labels("x", "y")
ax.twin_axis_simple("x", 0, 100, r"$10 x$")
ax.twin_axis_simple("y", 1, 10**5, r"$10^y$", log=True)
Note that for a slightly more complicated version of this plot, say if
we wanted the top x axis to be x^2 rather than 10x, the limits would
still be the same, but since the new axis will always be a linear or log
scale the new axis won't represent the true relationship between the
variables on the twin axes. See the other function for that.
"""
if axis == "x":
new_ax = super(Axes_bpl, self).twiny()
new_ax.set_xlim(lower_lim, upper_lim)
if log:
new_ax.set_xscale("log")
new_ax.set_xlabel(label)
elif axis == "y":
new_ax = super(Axes_bpl, self).twinx()
new_ax.set_ylim(lower_lim, upper_lim)
if log:
new_ax.set_yscale("log")
new_ax.set_ylabel(label)
else:
raise ValueError("Axis must be either 'x' or 'y'. ")
return new_ax
def twin_axis(ax, func, axis, new_ticks, label=None):
"""
Create a twin axis, where the new axis values are an arbitrary function
of the old values.
This is used when you want to put two related quantities on the axis,
for example distance/redshift in astronomy, where one isn't a simple
scaling of the other. If you want a simple linear or log scale, use the
`twin_axis_simple` function. This one will create a new axis that is
an arbitrary scale.
Here is a way this can be used.
.. plot::
:include-source:
import betterplotlib as bpl
bpl.presentation_style()
def square(x):
return x**2
def cubed(x):
return x**3
fig, ax = bpl.subplots(figsize=[5, 5], tight_layout=True)
ax.set_limits(0, 10, 0, 10.0001) # to avoid floating point errors
ax.add_labels("x", "y")
ax.twin_axis(square, "y", [0, 10, 30, 60, 100], r"$y^2$")
ax.twin_axis(cubed,"x", [0, 10, 100, 400, 1000], r"$x^3$")
Note that we had to be careful with floating point errors when one of
the markers we want is exactly on the edge. Make the borders slightly
larger to ensure that all labels fit on the plot.
This function will ignore values for the ticks that are outside the
limits of the plot. The following plot isn't the most useful, since
it could be done with the `axis_twin_simple`, but it gets the idea
across.
.. plot::
:include-source:
import betterplotlib as bpl
import numpy as np
bpl.presentation_style()
xs = np.logspace(0, 3, 100)
fig, ax = bpl.subplots(figsize=[5, 5], tight_layout=True)
ax.plot(xs, xs)
ax.set_xscale("log")
ax.set_yscale("log")
ax.add_labels("x", "y")
# extraneous values are ignored.
ax.twin_axis(np.log10, "x", [-1, 0, 1, 2, 3, 4, 5], "log(x)")
ax.twin_axis(np.log10, "y", [-1, 0, 1, 2, 3, 4, 5], "log(y)")
:param func: Function that transforms values from the original axis
into values that will be marked on the axis that will
be created here.
:param axis: Whether the new axis labels will be on the "x" or "y" axis.
If "x" is chosen this will place the markers on the top
botder of the plot, while "y" will place the values on the
left border of the plot. "x" and "y" are the only
allowed values.
:param new_ticks: List of of locations (in the new data values) to place
ticks. Any values outside the range of the plot
will be ignored.
:return: New axis object that was created, containing the newly
created labels.
"""
# support for automatically adding new ticks is not yet supported. You
# have to pass your own in.
# if new_ticks is None:
# if axis == "x":
# new_ticks = create_new_bins(func, ax.get_xticks())
# elif axis == "y":
# new_ticks = create_new_bins(func, ax.get_yticks())
# implementation details: The data values for the old axes will be used
# as the data values for the new scaled axis. This ensures that they
# will line up with each other. However, we will set the label text
# to be the values of func(x). The user will pass in the values of
# func(x), so we have to invert this to find the values of x where we
# will set the labels. This can be done with scipy.
# depending on which axis the user wants to use, we have to get
# different things.
if axis == "y":
new_ax = ax.twinx() # shares y axis
old_min, old_max = ax.get_ylim()
lim_func = new_ax.set_ylim # function to set limits
new_axis = new_ax.yaxis
new_ax.set_ylabel(label)
# the new axis needs to share the same scaling as the old
if ax.get_yscale() == "log":
new_ax.set_yscale("log")
# if we have log in old, we don't want minor ticks on the new
new_axis.set_tick_params(which="minor", length=0)
new_ax.set_ylabel(label)
elif axis == "x":
new_ax = ax.twiny() # shares x axis
old_min, old_max = ax.get_xlim()
lim_func = new_ax.set_xlim # function to set limits
new_axis = new_ax.xaxis
new_ax.set_xlabel(label)
# the new axis needs to share the same scaling as the old
if ax.get_xscale() == "log":
new_ax.set_xscale("log")
# if we have log in old, we don't want minor ticks on the new
new_axis.set_tick_params(which="minor", length=0)
else:
raise ValueError("`axis` must either be 'x' or 'y'. ")
# set the limits using the function we got earlier. We use the values
# of the old axies for the underlying data
lim_func(old_min, old_max)
# then determine the locations to put the new ticks, in terms of the
# old values
tick_locs_in_old = []
new_ticks_good = []
for new_value in new_ticks:
# define a function to minimize so scipy can work.
def minimize(x):
return abs(func(x) - new_value)
# ignore numpy warnings here, everything is fine.
with np.errstate(all='ignore'):
old_data_loc = optimize.minimize_scalar(minimize).x
# then check if it's within the correct values
if old_min <= old_data_loc <= old_max:
tick_locs_in_old.append(old_data_loc)
new_ticks_good.append(new_value)
# then put the ticks at the locations of the old data, but label them
# with the value of the transformed data.
new_axis.set_ticks(tick_locs_in_old)
new_axis.set_ticklabels(new_ticks_good)
return new_ax | gillenbrown/betterplotlib | betterplotlib/axes_bpl.py | Python | mit | 69,193 | [
"Gaussian"
] | 004eb6d6f83f6e1273393333f6b202eebab6d34cf9e2d6e1e458abcd95fd4a37 |
#coding:utf8
'''
Created on 2013-8-2
@author: lan (www.9miao.com)
'''
from firefly.utils.singleton import Singleton
class GlobalObject:
__metaclass__ = Singleton
def __init__(self):
self.netfactory = None#net前端
self.root = None#分布式root节点
self.remote = {}#remote节点
self.db = None
self.stophandler = None
self.webroot = None
self.masterremote = None
self.reloadmodule = None
self.remote_connect = None
self.json_config = {}
self.remote_map = {}
def config(self,netfactory=None,root = None,remote=None,db=None):
self.netfactory = netfactory
self.root = root
self.remote = remote
self.db = db
def masterserviceHandle(target):
"""
"""
GlobalObject().masterremote._reference._service.mapTarget(target)
def netserviceHandle(target):
"""
"""
GlobalObject().netfactory.service.mapTarget(target)
def rootserviceHandle(target):
"""
"""
GlobalObject().root.service.mapTarget(target)
class webserviceHandle:
"""这是一个修饰符对象
"""
def __init__(self,url=None):
"""
@param url: str http 访问的路径
"""
self._url = url
def __call__(self,cls):
"""
"""
from twisted.web.resource import Resource
if self._url:
child_name = self._url
else:
child_name = cls.__name__
path_list = child_name.split('/')
temp_res = None
path_list = [path for path in path_list if path]
patn_len = len(path_list)
for index,path in enumerate(path_list):
if index==0:
temp_res = GlobalObject().webroot
if index==patn_len-1:
res = cls()
temp_res.putChild(path, res)
return
else:
res = temp_res.children.get(path)
if not res:
res = Resource()
temp_res.putChild(path, res)
temp_res=res
class remoteserviceHandle:
"""
"""
def __init__(self,remotename):
"""
"""
self.remotename = remotename
def __call__(self,target):
"""
"""
GlobalObject().remote[self.remotename]._reference._service.mapTarget(target)
| yangdw/PyRepo | src/annotation/Firefly/firefly/server/globalobject.py | Python | mit | 2,453 | [
"Firefly"
] | befa344d8948ef1e2ea24fce1c413a1620aef2aac2330ccc85c951479ddc731e |
import os
import time
import math
import director.vtkAll as vtk
import PythonQt
from PythonQt import QtCore
from PythonQt import QtGui
from director import getDRCBaseDir as getDRCBase
from director import botspy
from director import openscope
import functools
_mainWindow = None
_defaultRenderView = None
def getMainWindow():
global _mainWindow
if _mainWindow is None:
for widget in QtGui.QApplication.topLevelWidgets():
if isinstance(widget, PythonQt.QtGui.QMainWindow):
_mainWindow = widget
break
return _mainWindow
def quit():
QtGui.QApplication.instance().quit()
def getViewManager():
return getMainWindow().viewManager()
def getDRCView():
return _defaultRenderView or getMainWindow().viewManager().findView('DRC View')
def getSpreadsheetView():
return getMainWindow().viewManager().findView('Spreadsheet View')
def getCurrentView():
return _defaultRenderView or getMainWindow().viewManager().currentView()
def getCurrentRenderView():
view = getCurrentView()
if hasattr(view, 'camera'):
return view
def getOutputConsole():
return getMainWindow().outputConsole()
def getPythonConsole():
return PythonQt.dd._pythonManager.consoleWidget()
def showPythonConsole():
getPythonConsole().show()
_exclusiveDockWidgets = {}
def hideDockWidgets(action):
for a, w in _exclusiveDockWidgets.iteritems():
if a is not action:
dock, widget = w
if not dock.isFloating():
dock.hide()
def addWidgetToDock(widget, dockArea=QtCore.Qt.RightDockWidgetArea, action=None):
dock = QtGui.QDockWidget()
dock.setWidget(widget)
dock.setWindowTitle(widget.windowTitle)
getMainWindow().addDockWidget(dockArea, dock)
if dockArea == QtCore.Qt.RightDockWidgetArea and action:
_exclusiveDockWidgets[action] = (dock, widget)
action.connect('triggered()', functools.partial(hideDockWidgets, action))
if action is None:
getMainWindow().addWidgetToViewMenu(dock)
else:
getMainWindow().addWidgetToViewMenu(dock, action)
return dock
def resetCamera(viewDirection=None, view=None):
view = view or getCurrentRenderView()
assert(view)
if viewDirection is not None:
camera = view.camera()
camera.SetPosition([0, 0, 0])
camera.SetFocalPoint(viewDirection)
camera.SetViewUp([0,0,1])
view.resetCamera()
view.render()
def setBackgroundColor(color, color2=None, view=None):
view = view or getCurrentRenderView()
assert(view)
if color2 is None:
color2 = color
ren = view.backgroundRenderer()
ren.SetBackground(color)
ren.SetBackground2(color2)
def displaySnoptInfo(info):
if getMainWindow() is not None:
getMainWindow().statusBar().showMessage('Info: %d' % info)
def toggleStereoRender():
view = getCurrentRenderView()
assert(view)
renderWindow = view.renderWindow()
renderWindow.SetStereoRender(not renderWindow.GetStereoRender())
view.render()
def getCameraTerrainModeEnabled(view):
return isinstance(view.renderWindow().GetInteractor().GetInteractorStyle(), vtk.vtkInteractorStyleTerrain2)
def setCameraTerrainModeEnabled(view, enabled):
if getCameraTerrainModeEnabled(view) == enabled:
return
if enabled:
view.renderWindow().GetInteractor().SetInteractorStyle(vtk.vtkInteractorStyleTerrain2())
view.camera().SetViewUp(0,0,1)
else:
view.renderWindow().GetInteractor().SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
view.render()
def toggleCameraTerrainMode(view = None):
view = view or getCurrentRenderView()
assert view
setCameraTerrainModeEnabled(view, not getCameraTerrainModeEnabled(view))
updateToggleTerrainAction(view)
def findMenu(menuTitle, mainWindow=None):
mainWindow = mainWindow or getMainWindow()
menus = mainWindow.findChildren('QMenu')
for menu in menus:
title = str(menu.title)
if title.startswith('&'):
title = title[1:]
if title == menuTitle:
return menu
def findToolBar(title, mainWindow=None):
mainWindow = mainWindow or getMainWindow()
bars = mainWindow.findChildren('QToolBar')
for bar in bars:
if title == str(bar.windowTitle):
return bar
def addMenuAction(menuTitle, actionName):
menu = findMenu(menuTitle)
assert menu
return menu.addAction(actionName)
def getToolBarActions():
return getActionsDict(getMainWindow().toolBarActions())
def getToolsMenuActions():
return getActionsDict(getMainWindow().toolsMenu().actions())
def getActionsDict(actions):
actionsDict = {}
for action in actions:
if action.name:
actionsDict[action.name] = action
return actionsDict
def updateToggleTerrainAction(view):
if not getMainWindow():
return
isTerrainMode = False
if hasattr(view, 'renderWindow'):
isTerrainMode = isinstance(view.renderWindow().GetInteractor().GetInteractorStyle(), vtk.vtkInteractorStyleTerrain2)
getToolBarActions()['ActionToggleCameraTerrainMode'].checked = isTerrainMode
class ActionToggleHelper(object):
'''
This class manages a checkable action and forwards checked events
to user selected callbacks.
'''
def __init__(self, action, getEnabledFunc, setEnabledFunc):
self.getEnabled = getEnabledFunc
self.setEnabled = setEnabledFunc
self.action = action
self.action.setCheckable(True)
self.action.checked = getEnabledFunc()
self.action.connect('triggered()', self.onActionChanged)
def updateAction(self):
self.action.checked = self.getEnabled()
def onActionChanged(self):
if self.action.checked:
self.setEnabled(True)
else:
self.setEnabled(False)
self.updateAction()
class MenuActionToggleHelper(ActionToggleHelper):
def __init__(self, menuName, actionName, getEnabledFunc, setEnabledFunc):
action = addMenuAction(menuName, actionName)
ActionToggleHelper.__init__(self, action, getEnabledFunc, setEnabledFunc)
def onCurrentViewChanged(previousView, currentView):
updateToggleTerrainAction(currentView)
def addToolbarMacro(name, func):
toolbar = getMainWindow().macrosToolBar()
action = toolbar.addAction(name)
action.connect('triggered()', func)
def removeToolbarMacro(name):
action = getToolBarActions()[name]
if action:
getMainWindow().panelToolBar().removeAction(action)
def addShortcut(widget, keySequence, func):
shortcut = QtGui.QShortcut(QtGui.QKeySequence(keySequence), widget)
shortcut.connect('activated()', func)
shortcut.connect('activatedAmbiguously()', func)
return shortcut
def setupActions():
botApyAction = getToolsMenuActions()['ActionBotSpy']
botApyAction.connect(botApyAction, 'triggered()', botspy.startBotSpy)
scopeAction = getToolsMenuActions()['ActionSignalScope']
scopeAction.connect(scopeAction, 'triggered()', openscope.startSignalScope)
def showErrorMessage(message, title='Error'):
QtGui.QMessageBox.warning(getMainWindow(), title, message)
def showInfoMessage(message, title='Info'):
QtGui.QMessageBox.information(getMainWindow(), title, message)
def showViewTabContextMenu(view, tabBar, menuPosition):
def onPopOut():
getViewManager().popOut(view)
menu = QtGui.QMenu(tabBar)
menu.addAction('Pop out').connect('triggered()', onPopOut)
menu.popup(menuPosition)
def onTabWidgetContextMenu(mouseClick):
tabBar = getViewManager().findChildren('QTabBar')[0]
tabIndex = tabBar.tabAt(mouseClick)
viewName = tabBar.tabText(tabIndex)
view = getViewManager().findView(viewName)
if view:
showViewTabContextMenu(view, tabBar, tabBar.mapToGlobal(mouseClick))
def setupViewManager():
vm = getViewManager()
vm.connect('currentViewChanged(ddViewBase*, ddViewBase*)', onCurrentViewChanged)
tabBar = vm.findChildren('QTabBar')[0]
tabBar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tabBar.connect('customContextMenuRequested(const QPoint &)', onTabWidgetContextMenu)
def startup(globals):
global _mainWindow
_mainWindow = globals['_mainWindow']
if 'DRC_BASE' not in os.environ:
showErrorMessage('DRC_BASE environment variable is not set')
return
if not os.path.isdir(getDRCBase()):
showErrorMessage('DRC_BASE directory does not exist: ' + getDRCBase())
return
_mainWindow.connect('resetCamera()', resetCamera)
_mainWindow.connect('toggleStereoRender()', toggleStereoRender)
_mainWindow.connect('toggleCameraTerrainMode()', toggleCameraTerrainMode)
setupActions()
setupViewManager()
| mitdrc/director | src/python/director/applogic.py | Python | bsd-3-clause | 8,862 | [
"VTK"
] | a950b98da52b725c746957192c40724d4563c740be70b49ac0610b3423ac274b |
from umibukela.importer.mavc import cycle1_2
"""
_________ _____ _________ _________ _____
/ _____/ / _ \ / _____// _____/ / _ \
\_____ \ / /_\ \ \_____ \ \_____ \ / /_\ \
/ \/ | \/ \/ \/ | \
/_______ /\____|__ /_______ /_______ /\____|__ /
\/ \/ \/ \/ \/
- no optional questions
- one "select all that apply" - visit_reason
- map valid answers to new columns val True/False
- remaining columns
- map known responses to cycle2 values
- map known responses to 'other' if in cycle2, otherwise n/a
"""
# In alphabetical order of the original column names according to pandas
#
# 'waiting_group/medicine_time',
columns = ['yes_no_group/alt_id', 'yes_no_group/bribe', 'performance_group/information', 'yes_no_group/nametag', 'performance_group/queues', 'performance_group/respect', 'yes_no_group/documents', 'tracking_no', 'transport_amount', 'waiting_time', 'visit_frequency', 'frequency_reason', 'personal_comment', 'clinic_feedback', 'improvements_comment', 'demographics_group/gender', 'facility', 'demographics_group/age', 'picture_permission', 'town_village', 'district', 'province', 'surveyor', 'today', 'visit_reason', 'service', 'performance_group/clean', 'performance_group/service_satisfaction', 'submitted_date', 'device_id', '_uuid']
# change values
# method
# for val in pd.unique(df.where(df['device_id']=='MAVCEC1')['facility'].ravel()):
# print val
#
# deviceid doesn't seem to be fixed to a site
#
# df.where(df['town_village']=='Folweni').replace(inplace=True, to_replace={'facility':{'Clinic':'notclinic'}})
# doesn't seem to work
# for c in df.columns:
# if c.startswith('waiting_group/'):
# print("### %s ###" % c)
# for val in pd.unique(df[c].ravel()):
# print("'%s': ''," % val)
# 'visit_reason': {
# '3 days for infant': 'accompanying',
replacements_all = {
'visit_reason': {
'State Old Age Grant': 'old_age',
'Disability Grant': 'disability',
'Child-Support Grant': 'child_support',
'Social relief of distress': 'social_relief',
'Foster Child Grant': 'foster_child',
'Care Dependency Grant': 'care_dependency',
'War-Veteran\'s Grant': 'war_veterans',
},
'clinic_feedback': {
'Yes': 'yes',
'No': 'no',
'Maybe': 'maybe',
},
'demographics_group/age': {
'26 - 40 years old': '26_40',
'41 - 60 years old': '40_60',
'Older than 60 years': 'older_60',
'Under 25 years old': 'under_25',
},
'demographics_group/gender': {
'Male': 'male',
'Female': 'female',
},
'performance_group/clean': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/information': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/queues': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/respect': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/service_satisfaction': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
},
'service': {
'': 'other',
'Appeal against a previous decision not to pay you a grant': 'appeal',
'Apply for a new grant': 'apply',
'Changing of payment': 'other',
'Come for a new card': 'other',
'Come to fetch my Sassa grant card.': 'other',
'Complain about bad service that you received': 'service_complaint',
'Enquire about deductions from your grant or short payments': 'deductions_enquiry',
'Fetch Sassa child\'s card for grant': 'other',
'General inquiry or get information about grants': 'general_enquiry',
'Get a "life certificate"': 'live_certificate',
'Grant in Aid': 'other',
'Hand in grant forms.': 'other',
'Lost Sassa grant card.': 'other',
'Lost card now came to make a new one.': 'other',
'Lost the child\'s card now coming to make a new one': 'other',
'Make a new Sassa grant card': 'other',
'Make a new card': 'other',
'Make a new card.': 'other',
'Report lost card': 'other',
'Sort out a problem with an existing grant': 'existing_grant_issue',
'Transfer money to grandmother.': 'other',
'Transfer of child\'s grant': 'other',
'Transfer of grant to me': 'other',
'Transfer': 'other',
'Waiting for a Sassa pay card.': 'other',
'adding child': 'other',
'adding': 'other',
'aplication for cancelling child support grant': 'other',
'application for lost card': 'other',
'application of cancelling for child support grant': 'other',
'card block': 'other',
'card lost': 'other',
'change grant from mother to grandma': 'other',
'come to change the money to be transferred to me': 'other',
'for fingerprints': 'other',
'get the last grant': 'other',
'i lost my sassa card so im ere for the new one': 'other',
'im her to renew my grant': 'other',
'im here for doctors appointment': 'other',
'im here for the result of my application': 'other',
'im here to add another child': 'other',
'im here to renew my grant': 'other',
'lost card': 'other',
'make a new card': 'other',
'no answer': 'n/a',
'pick many': 'n/a',
'proof of pension': 'other',
'renew my grant': 'other',
'renw my grant': 'other',
't0 add another child': 'other',
'to add another child': 'other',
'to book doctor appointment': 'other',
'to bring doctors letter': 'other',
'to cancell child support grant': 'other',
'to change the grant': 'other',
'to make a sassa card': 'other',
'to make sassa card': 'other',
},
'transport_amount': {
'R11 - R25': 'eleven_twentyfive',
'Nothing': 'nothing',
'R26 - R50': 'twentysix_fifty',
'Less than R10': 'under_ten',
'R51 - R75': 'fiftyone_seventyfive',
'More than R75': 'more_seventyfive',
},
'visit_frequency': {
'This is my second visit for this same issue': 'first',
'This is my first visit for this issue': 'second',
'This is my 3rd or more visit for the same issue': 'third_more',
},
'waiting_time': {
'30 minutes - 1 hour': 'thirty_one',
'1 - 2 hours': 'one_two',
'2 - 4 hours': 'two_four',
'4 - 6 hours': 'four_six',
'Less than 30 minutes': 'under_thirty',
'More than 6 hours': 'more_four',
},
'yes_no_group/alt_id': {
'Yes': 'yes',
'No': 'no',
'': 'n/a',
},
'yes_no_group/bribe': {
'Yes': 'yes',
'No': 'no',
'': 'n/a',
},
'yes_no_group/documents': {
'Yes': 'yes',
'No': 'no',
'': 'n/a',
},
'yes_no_group/nametag': {
'Yes': 'yes',
'No': 'no',
'': 'n/a',
},
}
# 'MAVCEC1': {
# 'facility': {
# 'Thabong Clinic': 'thabong',
device_replacements = {
'MAVCCT4': { # gugulethu
'facility': 'gugulethu',
},
'MAVCKZN3': { # utrecht
'facility': 'utrecht',
},
'MAVCKZN4': { # umzinto
'facility': 'umzinto',
},
'MAVCT3': { # paarl
'facility': 'paarl',
},
'MAVEC3': { # mthatha
'facility': 'mthatha',
},
'MAVEC5': { # uitenhgage
'facility': 'uitenhage',
},
'MAVJHB1': { # jouberton
'facility': 'jouberton',
},
'MAVJHB2': { # mahube
'facility': 'mahube',
},
'MAVJHB4': { # lephepane
'facility': 'lephepane',
},
'MAVKZN1': { # kwa-mhlanga
'facility': 'kwa-mhlanga',
},
}
# 'MAVCEC1': 'Health Citizen Survey MAVCEC1 - Data.csv',
device_files = {
'MAVCCT4': 'SASSA Citizen Survey Service Office MAVCCT4.csv',
'MAVCKZN3': 'SASSA Citizen Survey Service Office MAVCKZN3.csv',
'MAVCKZN4': 'SASSA Citizen Survey Service Office MAVCKZN4.csv',
'MAVCT3': 'SASSA Citizen Survey Service Office MAVCT3.csv',
'MAVEC3': 'SASSA Citizen Survey Service Office MAVEC3.csv',
'MAVEC5': 'SASSA Citizen Survey Service Office MAVEC5.csv',
'MAVJHB1': 'SASSA Citizen Survey Service Office MAVJHB1.csv',
'MAVJHB2': 'SASSA Citizen Survey Service Office MAVJHB2.csv',
'MAVJHB4': 'SASSA Citizen Survey Service Office MAVJHB4.csv',
'MAVKZN1': 'SASSA Citizen Survey Service Office MAVKZN1.csv',
}
# [c for c in df2.columns if c.startswith("visit_reason")]
#
# 'visit_reason': ['accompanying',
select_all_that_applies_columns = {
'visit_reason': [
'old_age',
'disability',
'child_support',
'social_relief',
'foster_child',
'care_dependency',
'war_veterans',
],
}
def run():
return cycle1_2.run(columns, replacements_all, device_files, device_replacements, select_all_that_applies_columns)
| Code4SA/umibukela | umibukela/importer/mavc/cycle1_2_sassa.py | Python | mit | 9,658 | [
"VisIt"
] | 21854bd5336c4fb8cbd03add09cb7158cb54bb3a5c2813a919ea4fa6a3735716 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constellation plotting tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
_COLORS = """
#a6cee3
#1f78b4
#b2df8a
#33a02c
#fb9a99
#e31a1c
#fdbf6f
#ff7f00
#cab2d6
#6a3d9a
#ffff99
#b15928""".split()
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) / 255.
for i in range(0, lv, lv // 3))
_COLORS = [c.strip() for c in _COLORS]
_COLORS = _COLORS[1::2] + _COLORS[::2]
_COLORS = np.asarray([hex_to_rgb(c) for c in _COLORS], dtype=np.float32)
def gaussian_blobs(params, height, width, norm='sum'):
"""Creates gaussian blobs on a canvas.
Args:
params: [B, 4] tensor, where entries represent (y, x, y_scale, x_scale).
height: int, height of the output.
width: int, width of the output.
norm: type of normalization to use; must be a postfix of some
tf.reduce_.* method.
Returns:
Tensor of shape [B, height, width].
"""
params = tf.expand_dims(params, -1)
uy, ux, sy, sx = tf.split(params, 4, -2)
rows = tf.range(tf.to_int32(height))
rows = tf.to_float(rows)[tf.newaxis, :, tf.newaxis]
cols = tf.range(tf.to_int32(width))
cols = tf.to_float(cols)[tf.newaxis, tf.newaxis, :]
dy = (rows - uy) / sy
dx = (cols - ux) / sx
z = tf.square(dy) + tf.square(dx)
mask = tf.exp(-.5 * z)
# normalize so that the contribution of each blob sums to one
# change this to `tf.reduce_max` if you want max value to be one
norm_func = getattr(tf, 'reduce_{}'.format(norm))
mask /= norm_func(mask, (1, 2), keep_dims=True) + 1e-8 # pylint:disable=not-callable
return mask
def gaussian_blobs_const_scale(params, scale, height, width, norm='sum'):
scale = tf.zeros_like(params[Ellipsis, :2]) + scale
params = tf.concat([params[Ellipsis, :2], scale], -1)
return gaussian_blobs(params, height, width, norm)
def denormalize_coords(coords, canvas_size, rounded=False):
coords = (coords + 1.) / 2. * np.asarray(canvas_size)[np.newaxis]
if rounded:
coords = tf.round(coords)
return coords
def render_by_scatter(size, points, colors=None, gt_presence=None):
"""Renders point by using tf.scatter_nd."""
if colors is None:
colors = tf.ones(points.shape[:-1].as_list() + [3], dtype=tf.float32)
if gt_presence is not None:
colors *= tf.cast(tf.expand_dims(gt_presence, -1), colors.dtype)
batch_size, n_points = points.shape[:-1].as_list()
shape = [batch_size] + list(size) + [3]
batch_idx = tf.reshape(tf.range(batch_size), [batch_size, 1, 1])
batch_idx = snt.TileByDim([1], [n_points])(batch_idx)
idx = tf.concat([batch_idx, tf.cast(points, tf.int32)], -1)
return tf.scatter_nd(idx, colors, shape)
def render_constellations(pred_points,
capsule_num,
canvas_size,
gt_points=None,
n_caps=2,
gt_presence=None,
pred_presence=None,
caps_presence_prob=None):
"""Renderes predicted and ground-truth points as gaussian blobs.
Args:
pred_points: [B, m, 2].
capsule_num: [B, m] tensor indicating which capsule the corresponding point
comes from. Plots from different capsules are plotted with different
colors. Currently supported values: {0, 1, ..., 11}.
canvas_size: tuple of ints
gt_points: [B, k, 2]; plots ground-truth points if present.
n_caps: integer, number of capsules.
gt_presence: [B, k] binary tensor.
pred_presence: [B, m] binary tensor.
caps_presence_prob: [B, m], a tensor of presence probabilities for caps.
Returns:
[B, *canvas_size] tensor with plotted points
"""
# convert coords to be in [0, side_length]
pred_points = denormalize_coords(pred_points, canvas_size, rounded=True)
# render predicted points
batch_size, n_points = pred_points.shape[:2].as_list()
capsule_num = tf.to_float(tf.one_hot(capsule_num, depth=n_caps))
capsule_num = tf.reshape(capsule_num, [batch_size, n_points, 1, 1, n_caps, 1])
color = tf.convert_to_tensor(_COLORS[:n_caps])
color = tf.reshape(color, [1, 1, 1, 1, n_caps, 3]) * capsule_num
color = tf.reduce_sum(color, -2)
color = tf.squeeze(tf.squeeze(color, 3), 2)
colored = render_by_scatter(canvas_size, pred_points, color, pred_presence)
# Prepare a vertical separator between predicted and gt points.
# Separator is composed of all supported colors and also serves as
# a legend.
# [b, h, w, 3]
n_colors = _COLORS.shape[0]
sep = tf.reshape(tf.convert_to_tensor(_COLORS), [1, 1, n_colors, 3])
n_tiles = int(colored.shape[2]) // n_colors
sep = snt.TileByDim([0, 1, 3], [batch_size, 3, n_tiles])(sep)
sep = tf.reshape(sep, [batch_size, 3, n_tiles * n_colors, 3])
pad = int(colored.shape[2]) - n_colors * n_tiles
pad, r = pad // 2, pad % 2
if caps_presence_prob is not None:
n_caps = int(caps_presence_prob.shape[1])
prob_pads = ([0, 0], [0, n_colors - n_caps])
caps_presence_prob = tf.pad(caps_presence_prob, prob_pads)
zeros = tf.zeros([batch_size, 3, n_colors, n_tiles, 3], dtype=tf.float32)
shape = [batch_size, 1, n_colors, 1, 1]
caps_presence_prob = tf.reshape(caps_presence_prob, shape)
prob_vals = snt.MergeDims(2, 2)(caps_presence_prob + zeros)
sep = tf.concat([sep, tf.ones_like(sep[:, :1]), prob_vals], 1)
sep = tf.pad(sep, [(0, 0), (1, 1), (pad, pad + r), (0, 0)],
constant_values=1.)
# render gt points
if gt_points is not None:
gt_points = denormalize_coords(gt_points, canvas_size, rounded=True)
gt_rendered = render_by_scatter(canvas_size, gt_points, colors=None,
gt_presence=gt_presence)
colored = tf.where(tf.cast(colored, bool), colored, gt_rendered)
colored = tf.concat([gt_rendered, sep, colored], 1)
res = tf.clip_by_value(colored, 0., 1.)
return res
def concat_images(img_list, sep_width, vertical=True):
"""Concatenates image tensors."""
if vertical:
sep = tf.ones_like(img_list[0][:, :sep_width])
else:
sep = tf.ones_like(img_list[0][:, :, :sep_width])
imgs = []
for i in img_list:
imgs.append(i)
imgs.append(sep)
imgs = imgs[:-1]
return tf.concat(imgs, 2 - vertical)
def apply_cmap(brightness, cmap):
indices = tf.cast(brightness * 255.0, tf.int32)
# Make sure the indices are in the right range. Comes in handy for NaN values.
indices = tf.clip_by_value(indices, 0, 256)
cm = matplotlib.cm.get_cmap(cmap)
colors = tf.constant(cm.colors, dtype=tf.float32)
return tf.gather(colors, indices)
def render_activations(activations, height, pixels_per_caps=2, cmap='gray'):
"""Renders capsule activations as a colored grid.
Args:
activations: [B, n_caps] tensor, where every entry is in [0, 1].
height: int, height of the resulting grid.
pixels_per_caps: int, size of a single grid cell.
cmap: string: matplotlib-compatible cmap name.
Returns:
[B, height, width, n_channels] tensor.
"""
# convert activations to colors
if cmap == 'gray':
activations = tf.expand_dims(activations, -1)
else:
activations = apply_cmap(activations, cmap)
batch_size, n_caps, n_channels = activations.shape.as_list()
# pad to fit a grid of prescribed hight
n_rows = 1 + (height - pixels_per_caps) // (pixels_per_caps + 1)
n_cols = n_caps // n_rows + ((n_caps % n_rows) > 0)
n_pads = n_rows * n_cols - n_caps
activations = tf.pad(activations, [(0, 0), (0, n_pads), (0, 0)],
constant_values=1.)
# tile to get appropriate number of pixels to fil a pixel_per_caps^2 square
activations = snt.TileByDim([2], [pixels_per_caps**2])(
tf.expand_dims(activations, 2))
activations = tf.reshape(activations, [batch_size, n_rows, n_cols,
pixels_per_caps, pixels_per_caps,
n_channels])
# pad each cell with one white pixel on the bottom and on the right-hand side
activations = tf.pad(activations, [(0, 0), (0, 0), (0, 0), (0, 1), (0, 1),
(0, 0)], constant_values=1.)
# concat along row and col dimensions
activations = tf.concat(tf.unstack(activations, axis=1), axis=-3)
activations = tf.concat(tf.unstack(activations, axis=1), axis=-2)
# either pad or truncated to get the correct height
if activations.shape[1] < height:
n_pads = height - activations.shape[1]
activations = tf.pad(activations, [(0, 0), (0, n_pads), (0, 0), (0, 0)])
else:
activations = activations[:, :height]
return activations
def correlation(x, y):
"""Computes correlation between x and y.
Args:
x: [B, m],
y: [B, n]
Returns:
corr_xy [m, n]
"""
# [B, m+n]
m = int(x.shape[-1])
xy = tf.concat([x, y], -1)
# [m+n, m+n]
corr = tfp.stats.correlation(xy, sample_axis=0)
corr_xy = corr[:m, m:]
return corr_xy
def make_tsne_plot(caps_presence, labels, filename=None, save_kwargs=None):
"""Makes a TSNE plot."""
# idx = np.random.choice(res.test.posterior_pres.shape[0], size=int(1e4),
# replace=False)
# points = res.train.posterior_pres[idx]
# labels = res.train.label[idx]
tsne = TSNE(2, perplexity=50)
embedded = tsne.fit_transform(caps_presence)
colors = np.asarray([
166, 206, 227,
31, 120, 180,
178, 223, 138,
51, 160, 44,
251, 154, 153,
227, 26, 28,
253, 191, 111,
255, 127, 0,
202, 178, 214,
106, 61, 154
], dtype=np.float32).reshape(10, 3) / 255.
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
for i in range(10):
idx = (labels == i)
points_for_label = embedded[idx]
ax.scatter(points_for_label[:, 0], points_for_label[:, 1], c=colors[i])
if filename is not None:
if save_kwargs is None:
save_kwargs = dict(bbox_inches='tight', dpi=300)
fig.savefig(filename, **save_kwargs)
plt.close(fig)
| google-research/google-research | stacked_capsule_autoencoders/capsules/plot.py | Python | apache-2.0 | 10,874 | [
"Gaussian"
] | 32bab5ca1afcba0ec0be18fac34249417e69210944a65591ec9d87e2bc8da3fe |
# config.py ---
#
# Filename: config.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Sat Feb 13 16:07:56 2010 (+0530)
# Version:
# Last-Updated: Mon Nov 15 09:53:00 2010 (+0530)
# By: Subhasis Ray
# Update #: 145
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import os
import sys
import tempfile
import logging
from PyQt4.Qt import Qt
from PyQt4 import QtGui, QtCore
moose_version = '1.4'
settings = None
TEMPDIR = tempfile.gettempdir()
KEY_FIRSTTIME = 'firsttime'
KEY_HOME_DIR = os.path.abspath(__file__).rstrip('config.py')
KEY_MAIN_DIR = os.path.abspath(os.path.join(KEY_HOME_DIR,'..'))
KEY_ICON_DIR = os.path.join(KEY_HOME_DIR,'icons')
user_home = os.path.expanduser('~')
user_moose_dir = os.path.join(user_home, 'moose%s' % (moose_version))
KEY_DEMOS_DIR = os.path.join(user_moose_dir,'DEMOS','pymoose')
KEY_WINDOW_GEOMETRY = os.path.join(KEY_HOME_DIR,'geometry')
KEY_WINDOW_LAYOUT = os.path.join(KEY_HOME_DIR,'layout')
KEY_RUNTIME_AUTOHIDE = os.path.join(KEY_HOME_DIR,'rtautohide')
KEY_GL_COLORMAP = os.path.join(KEY_HOME_DIR,'oglfunc','colors')
KEY_GL_BACKGROUND_COLOR = 'glclient/bgcolor'
QT_VERSION = str(QtCore.QT_VERSION_STR).split('.')
QT_MAJOR_VERSION = int(QT_VERSION[0])
QT_MINOR_VERSION = int(QT_VERSION[1])
MOOSE_DOC_FILE = os.path.abspath(os.path.join(KEY_HOME_DIR,'documentation.pdf'))
MOOSE_REPORT_BUG_URL = 'http://sourceforge.net/tracker/?func=add&group_id=165660&atid=836272'
def get_settings():
'''Initializes the QSettings for the application and returns it.'''
global settings
if not settings:
QtCore.QCoreApplication.setOrganizationName('NCBS')
QtCore.QCoreApplication.setOrganizationDomain('ncbs.res.in')
QtCore.QCoreApplication.setApplicationName('MOOSE')
settings = QtCore.QSettings()
return settings
# LOG_FILENAME = os.path.join(TEMPDIR, 'moose.log')
LOG_LEVEL = logging.ERROR
# logging.basicConfig(filename=LOG_FILENAME, level=LOG_LEVEL, filemode='w', format='%(asctime)s %(levelname)s %(name)s %(filename)s %(funcName)s: %(lineno)d: %(message)s')
logging.basicConfig(stream=sys.stdout, level=LOG_LEVEL, filemode='w', format='%(asctime)s %(levelname)s %(name)s %(filename)s %(funcName)s: %(lineno)d: %(message)s')
LOGGER = logging.getLogger('moose')
BENCHMARK_LOGGER = logging.getLogger('moose.benchmark')
BENCHMARK_LOGGER.setLevel(logging.INFO)
| BhallaLab/moose-thalamocortical | gui/config.py | Python | lgpl-2.1 | 3,143 | [
"MOOSE"
] | 02725f10c66bc1b05c29836b0aade806aa86056dfa4c4471d03202cff6a4e5f9 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
from espressomd import System, lb
class SwimmerTest():
system = System(box_l=3 * [6])
system.cell_system.skin = 1
system.time_step = 1e-2
LB_params = {'agrid': 1,
'dens': 1.1,
'visc': 1.2,
'kT': 0,
'tau': system.time_step}
gamma = 0.3
lbf = None
def add_all_types_of_swimmers(
self,
fix=False,
rotation=False,
put_in_corners=True):
"""Places all combinations of pusher/puller and f_swim/v_swim
in a box, either in the corners or around the center
"""
system = self.system
plus_x = np.sqrt([.5, 0, .5, 0])
plus_y = np.sqrt([0, 0, .5, .5])
plus_z = np.sqrt([.5, 0, 0, .5])
minus_y = np.sqrt([.5, .5, 0, 0])
pos0 = [2, 0.01, 3] if put_in_corners else [2, 3, 2.5]
pos1 = [5.99, 2, 3] if put_in_corners else [3.1, 2.1, 2.2]
pos2 = [2, 3, 5.99] if put_in_corners else [2.9, 2.5, 3]
pos3 = [1.5, 5.99, 1] if put_in_corners else [2, 2, 2.5]
system.part.add(pos=pos0, quat=minus_y, fix=3 * [fix],
mass=0.9, rinertia=3 * [7], rotation=3 * [rotation],
swimming={"mode": "pusher", "f_swim": 0.10,
"dipole_length": 0.5})
system.part.add(pos=pos1, quat=plus_x, fix=3 * [fix],
mass=1.9, rinertia=3 * [8], rotation=3 * [rotation],
swimming={"mode": "pusher", "v_swim": 0.02,
"dipole_length": 0.6})
system.part.add(pos=pos2, quat=plus_z, fix=3 * [fix],
mass=2.9, rinertia=3 * [9], rotation=3 * [rotation],
swimming={"mode": "puller", "f_swim": 0.08,
"dipole_length": 0.7})
system.part.add(pos=pos3, quat=plus_y, fix=3 * [fix],
mass=3.9, rinertia=3 * [10], rotation=3 * [rotation],
swimming={"mode": "puller", "v_swim": 0.05,
"dipole_length": 0.8})
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
self.system.lbboundaries.clear()
self.system.thermostat.turn_off()
self.lbf = None
def test_conflicting_parameters(self):
"""v_swim and f_swim can't be set at the same time
"""
swimmer = self.system.part.add(pos=[3] * 3)
with self.assertRaises(Exception):
swimmer.swimming = {"v_swim": 0.3, "f_swim": 0.6}
def test_momentum_conservation(self):
"""friction as well as 'active' forces apply to particles
and to the fluid, so total momentum is conserved
"""
self.add_all_types_of_swimmers(rotation=False)
self.system.integrator.run(20, reuse_forces=True)
tot_mom = self.system.analysis.linear_momentum(include_particles=True,
include_lbfluid=True)
# compensate half-step offset between force calculation and LB-update
for part in self.system.part:
tot_mom += part.f * self.system.time_step / 2.
np.testing.assert_allclose(tot_mom, 3 * [0.], atol=self.tol)
def test_particle_forces(self):
"""run through all swimmers to check expected forces
"""
self.add_all_types_of_swimmers(rotation=False)
self.system.integrator.run(10)
for swimmer in self.system.part:
f_swim = swimmer.swimming['f_swim']
v_swim = swimmer.swimming['v_swim']
director = swimmer.director
# due to dt/2 time-shift between force calculation and LB-update,
# v_swimmer has to be calculated at the half step
v_swimmer = swimmer.v + \
0.5 * self.system.time_step * swimmer.f / swimmer.mass
# for friction coupling, the old fluid at the new position is used
v_fluid = self.lbf.get_interpolated_velocity(
swimmer.pos + self.system.time_step * v_swimmer)
force = -self.gamma * (v_swimmer - v_fluid) + \
f_swim * director + self.gamma * v_swim * director
self.system.integrator.run(1, reuse_forces=True)
np.testing.assert_allclose(
np.copy(swimmer.f), force, atol=self.tol)
def check_fluid_force(self, swimmer):
pass
# forces on particles are checked
# total force on the fluid matches (momentum conservation)
# TODO: only thing left to check is the location of the fluid force.
@utx.skipIfMissingFeatures(["ENGINE", "ROTATIONAL_INERTIA", "MASS"])
class SwimmerTestCPU(SwimmerTest, ut.TestCase):
def setUp(self):
self.tol = 1e-10
self.lbf = lb.LBFluid(**self.LB_params)
self.system.actors.add(self.lbf)
self.system.thermostat.set_lb(LB_fluid=self.lbf, gamma=self.gamma)
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["ENGINE", "ROTATIONAL_INERTIA", "MASS"])
class SwimmerTestGPU(SwimmerTest, ut.TestCase):
def setUp(self):
self.tol = 1e-5
self.lbf = lb.LBFluidGPU(**self.LB_params)
self.system.actors.add(self.lbf)
self.system.thermostat.set_lb(LB_fluid=self.lbf, gamma=self.gamma)
if __name__ == "__main__":
ut.main()
| KaiSzuttor/espresso | testsuite/python/engine_lb.py | Python | gpl-3.0 | 6,177 | [
"ESPResSo"
] | a6e3c8fd802f9c721529a01657f228e802bdba3f968c954b7b3b78e003e88b4a |
#!/usr/bin/python
from multiprocessing import Pool
import time
import os
import sys
import argparse
import math
from homolog4 import *
from collections import defaultdict
import itertools
from collections import Counter
from Levenshtein import distance
import cPickle as pickle
# Copyright(C) 2014 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description="This program will calculate the number of events that an organismal pair do not have in common. The return is a pickled dictionary.")
parser.add_argument("-i", "--infile", dest="infile", default='./regulonDB/operon_names_and_genes.txt', metavar="FILE",
help="The parsed gene block file. The default is the parsed regulonDB operon file.")
parser.add_argument("-I", "--infolder", dest="infolder", default='./optimized_operon/', metavar="DIRECTORY",
help="A folder that contains the final operon results. The files will have operons grouped by organism, arranged by start, and have spurious BLAST results removed.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./operon_distance_matrices/',
help="Folder where the filtered results will be stored. Default is the folder './operon_distance_matrices/'.")
parser.add_argument("-F", "--operon_filter", dest="operon_filter", default='NONE', metavar="FILE",
help="A file that contains the operons that are under investigation. All others will be omitted from analysis an results.")
# this is where i will/would add the option to filter on organism. to be continued...
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
return parser.parse_args()
def check_options(parsed_args):
if os.path.exists(parsed_args.infile):
gene_block_file = parsed_args.infile
elif parsed_args.infile == 'NONE':
gene_block_file = 'NONE'
else:
print "The file %s does not exist." % parsed_args.infile
sys.exit()
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The folder %s does not exist." % parsed_args.infolder
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
if outfolder[-1] != '/':
outfolder = outfolder + '/'
if os.path.exists(parsed_args.operon_filter):
operon_filter_file = parsed_args.operon_filter
elif parsed_args.operon_filter == 'NONE':
operon_filter_file = parsed_args.operon_filter
else:
print "The file %s does not exist." % parsed_args.operon_filter
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
return gene_block_file, infolder, outfolder, operon_filter_file, num_proc
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def return_recursive_dir_files(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname):
result.append(fname)
return result
# This function returns the list of gene block files, filtered by gene block if a filter file is supplied by the user.
def return_file_list(infolder, filter_file):
if filter_file == 'NONE':
return return_recursive_dir_files(infolder)
else:
filter_list = [i.strip() for i in open(filter_file)]
return [i for i in return_recursive_dir_files(infolder) if os.path.basename(i).split('.')[0] in filter_list]
# This function will take the folder of the operon result files, and it will return a dictionary that has as a primary key
# the operon name, the secondary key will be nc. The values of the attributes will then follow.
def parse_operon_result_files(in_folder, dest_folder, operon_filter_file):
file_list = return_file_list(in_folder, operon_filter_file)
print "file_list", file_list
result = {}
#distmat_dict = {}
iddo_result = {}
#gene_dict = make_operon_gene_string_dict(operon_file)
#print len(gene_dict)
print "in_folder", in_folder
#for i in open(distmat_file).readlines():
# nc = i.strip().split('\t')[0]
# val = i.strip().split('\t')[1].strip() # fixes a wonky error in distmat.... ugh
# distmat_dict.update({nc: val})
for fname in file_list:
operon = fname.split('/')[-1].split('.')[0]
#fname = in_folder + f
print "fname", fname, operon
result.update({operon: {}})
iddo_result.update({operon: {}})
file_output = []
summary_info = [i.strip().split('\t')[1:] for i in open(fname).readlines() if i[:2] == '##']
homolog_entries = []
print "summary_info", summary_info
# This section of code has a single purpose. ad-hoc the goodness of rearrangements and distance
# between grouped genes.
summmary_info = []
tmp_hlog_list_for_grouping = []
ignore_list = ['#', '$', '@', '+']
#for i in [i.strip() for i in open(fname).readlines() if len(i) > 1]: # ugh, corrects for some garbage in the files (occurs once it seems)
for i in [i.strip() for i in open(fname).readlines() if i.split('\t')[0] == '##']:
#for i in [i.strip() for i in open(fname).readlines() if len(i) > 1 and i[0] == '#']:
#if i[:2] == '##':
if len(i) < 2:
print "err", i, fname
if i[0] == '#':
comprehensive_list, group_list = group_homologs(tmp_hlog_list_for_grouping, INTERGENIC_MAX_LENGTH)
for group in group_list:
#print gene_dict[operon]['reference_string']
rearrangements = return_operon_string_distance(gene_dict[operon]['reference_string'], return_group_str(group, operon, gene_dict))
print gene_dict[operon]['reference_string'], return_group_str(group, operon, gene_dict), return_operon_string_distance(gene_dict[operon]['reference_string'], return_group_str(group, operon, gene_dict))
print "vals recorded", gene_dict[operon]['reference_string'], return_group_str(group, operon, gene_dict), return_operon_string_distance(gene_dict[operon]['reference_string'], return_group_str(group, operon, gene_dict)), rearrangements
print "i line", i, len(i.split('\t'))
try:
a,nc,c,d,e,f,g,h,i,j,k = i.split('\t') # only interested in a few of the fields here:
distmat_val = distmat_dict[nc]
print c
common = '_'.join(c.split('_')[:2])
print "common", common
except:
print "Error in line", i, fname
#print len(tmp_hlog_list_for_grouping), tmp_hlog_list_for_grouping, len(group_list)
tmp_hlog_list_for_grouping = []
elif i[0] not in ignore_list:
tmp_hlog_list_for_grouping.append(return_homolog(i))
pass
#print i.split('\t')
ignore_list = ['#', '$', '@', '+']
#for line in [i.strip() for i in open(fname).readlines() if i[0] not in ignore_list ]:
# print line
#print homolog_entries
#print summary_info
print "summary_info2", summary_info
for item in summary_info:
nc = item[0]
common = ' '.join(item[1].split('_')[:2])
if nc in distmat_dict:
distmat_val = distmat_dict[nc]
#print nc, common
line = [nc, common, distmat_val] + [i.strip() for i in item[2:]]
#print line
file_output.append(line)
header = 'NC_Number,Common,Distance,Splitting,Migration,Duplicates,Deletions,Splits,Longest_Group,Total_Fusions,Group_Fusions\n'
handle = open(dest_folder + operon + '.csv', 'w')
handle.write(header)
handle.write('\n'.join([','.join(i) for i in file_output]))
handle.close()
#print result.keys()
'''
#####################################
# No longer used #
#####################################
# This function will return information about the gene blocks from a parse gene block file.
# The default is the parsed operon file form regulonDB, but the user could also provide one.
# The format is operon_name gene1 gene2 gene3 gene4 etc... tab delineated.
def return_gene_block_dictionary(flist, gene_block_file):
# Step 1: Determine information about the gene block(s) that we are investigating
gene_block_length_dict = {}
for line in [i.strip().split('\t') for i in open(gene_block_file).readlines()]:
gene_block = line[0]
gene_list = line[1:]
number_genes = len(gene_list)
gene_block_length_dict.update({gene_block:{'number_genes':number_genes, 'gene_list':gene_list}})
# testing step 1
print sorted(gene_block_length_dict.keys())
return gene_block_length_dict
'''
# This function will return the information about each organism in a given gene block
# The information will be the accession, and a gene count of each of the gene recovered and the number of groups.
# The returned dict will be keyed {gene_block:{accession:[gene1, gene2, gene3, etc...]}. This will be used
# in later steps to generate the gene and split counts used to create the distance matrix in later steps.
def return_gene_block_organism_data(flist):
result = {}
for fname in flist:
# this will store the homologs in a dict {accession:[hlog1, hlog2, etc]} temporarily.
org_dict = {}
gene_block = os.path.basename(fname).split('.')[0]
#print "gene_block", gene_block
for line in [i.strip() for i in open(fname).readlines()]:
# catch some errors if they occur and report a useful message as a result.
try:
hlog = Homolog.from_blast(line)
except:
print "ERROR", line
try:
accession = hlog.accession()
except:
print line
predicted_gene = hlog.blast_annotation()
# store the homolog where it belongs
if accession in org_dict.keys():
org_dict[accession].append(hlog)
else:
org_dict.update({accession:[hlog]})
# Now make sure that everything is put neatly in order by start position.
for accession in sorted(org_dict.keys()):
h_list = org_dict.pop(accession)
h_list.sort(key=lambda x: x.start())
org_dict.update({accession:h_list})
# Store the dict as an entry in the result.
result.update({gene_block:org_dict})
return result
# adding a list of keys that are not the names of genes. These keys exist to keep track of non-gene information about the
# gene blocks that were are investigating.
IGNORE_LIST = ['groups']
# This function strips the necessary information about the homologs that we report per gene block/organism pair
# so that event determination between different organisms can be performed. No events are determined in this stage,
# simply information gathering, for later compairision.
# If we wish to add new events to our method, this is the first place that we will need to add new code.
def make_event_reporting_data_structure(gene_block_dict, max_gap):
result = {}
for gene_block in sorted(gene_block_dict.keys()):
print gene_block
result.update({gene_block:{}})
for organism in gene_block_dict[gene_block].keys():
print organism
# we use this list twice, so storing it
list_homologs = gene_block_dict[gene_block][organism]
# returns a count of each gene that was in the homolog list as a dictionary
org_gene_cnt = dict(Counter([i.blast_annotation() for i in list_homologs]))
# returns a list of gene blocks, the number of which can be used to determine splits later.
# the special name 'groups' is how that can be determined.
gene_block_groupings = homolog_list_grouping_function(list_homologs, max_gap)
groups = len(gene_block_groupings)
# save the number of groups to the result dict for the organism under investigation
org_gene_cnt.update({'groups':groups})
# Store the result
result[gene_block].update({organism:org_gene_cnt})
return result
# This function will return the number of splits that two organisms do not share.
def return_splits(data_struct, org1, org2):
result = 0
if org1 != org2:
org1_groups = data_struct[org1]['groups']
org2_groups = data_struct[org2]['groups']
result = int(math.fabs(org1_groups - org2_groups))
else:
pass
return result
# this function will return the number of deletions as the number of unique genes they do not share
def return_deletions(data_struct, org1, org2):
if org1 != org2:
# Removing the key 'genes', and any other non gene information in the reporting data_struct using the ignore_list
#gene_list_org1 = data_struct[org1].keys()
gene_list_org1 = [i for i in data_struct[org1].keys() if i not in IGNORE_LIST]
#gene_list_org2 = data_struct[org2].keys()
gene_list_org2 = [i for i in data_struct[org2].keys() if i not in IGNORE_LIST]
#unique_gene_list = [i for i in list(set(gene_list_org1) - set(gene_list_org2)) if i not in IGNORE_LIST]
unique_gene_list = list(set(gene_list_org1 + gene_list_org2))
# Here we determine how many deletions there are in each organism. The total of these deletions is the number
# of deletions between the organisms.
deletions_in_org1 = len(list(set(unique_gene_list)- set(gene_list_org1)))
deletions_in_org2 = len(list(set(unique_gene_list)- set(gene_list_org2)))
return deletions_in_org1 + deletions_in_org2
else:
return 0
# this function will return the number of deletions as the number of duplicated genes
def return_duplications(data_struct, org1, org2):
result = 0
if org1 != org2:
gene_list_org1 = data_struct[org1].keys()
gene_list_org2 = data_struct[org2].keys()
unique_gene_list = [i for i in list(set(gene_list_org1) - set(gene_list_org2)) if i not in IGNORE_LIST]
for gene in unique_gene_list:
duplications = 0
if gene in data_struct[org1].keys():
gene1_copy_number = data_struct[org1][gene]
else:
gene1_copy_number = 0
if gene in data_struct[org2].keys():
gene2_copy_number = data_struct[org2][gene]
else:
gene2_copy_number = 0
#########################################################################################################################
# This is where we calculate duplications. The issues here are that we first consider a deletion BEFORE a duplication. #
# Therefore if organism 1 lacks a gene, for this explanation call it gene 'A', and organism 2 has two copies of it, we #
# will say that the first copy of gene A in organism 1 is deleted with respect to organism 2. Similarily the second #
# of this gene is a duplication. This will yield the situation where we count one deletion and one duplication for this#
# situation. #
#########################################################################################################################
gene_list = [gene1_copy_number, gene2_copy_number]
# if one organisms is missing the gene entirely
if min(gene_list) == 0:
duplications = sum(gene_list) - 1
# if both organisms have the gene
else:
duplications = int(math.fabs(gene1_copy_number - gene2_copy_number))
result += duplications
else:
# org 1 and org2 are the same, so we do nothing
pass
return result
# This function will return an all vs. all pickled dict of the format:
# {gene_block:{NC1:{NC2:{event1:numeric, event2:numeric, etc:numeric}}}}
def return_event_counts_as_dict(event_reporting_data_structure):
result = {}
for gene_block in sorted(event_reporting_data_structure.keys()):
print "gene_block ", gene_block
result.update({gene_block:{}})
org_list = event_reporting_data_structure[gene_block]
# use the magic that is itertools to make an iterable for all combinations of two organisms for compairison
for pair in itertools.combinations_with_replacement(org_list, 2):
#print "Pair", pair
org1, org2 = pair
deletions = return_deletions(event_reporting_data_structure[gene_block], org1, org2)
duplications = return_duplications(event_reporting_data_structure[gene_block], org1, org2)
splits = return_splits(event_reporting_data_structure[gene_block], org1, org2)
print "org1", org1, "org2", org2
#print "Deletions", deletions, "Duplications", duplications, "Splits", splits
# There is a faster implementation that uses a try except... which i have been kind enough to include if speed is you thing.
# on small lists of organisms it really does not matter
'''if org1 in result[gene_block].keys():
result[gene_block][org1].update({org2:{'deletions':deletions, 'duplications':duplications, 'splits':splits}})
else:
result[gene_block].update({org1:{org2:{'deletions':deletions, 'duplications':duplications, 'splits':splits}}})
'''
if org1 in result[gene_block].keys():
result[gene_block][org1].update({org2:{'deletions':deletions, 'duplications':duplications, 'splits':splits}})
else:
result[gene_block].update({org1:{org2:{'deletions':deletions, 'duplications':duplications, 'splits':splits}}})
'''
try:
result[gene_block][org1].update({org2:{'deletions':deletions, 'duplications':duplications, 'splits':splits}})
except:
result[gene_block].update({org1:{org2:{'deletions':deletions, 'duplications':duplications, 'splits':splits}}})
'''
return result
# This function will return the rearrangement distance between two organisms by using the levinstein edit distance
# as a rough metric.
def return_rearrangements(sorted_gene_block_results, org1, org2):
if org1 != org2:
pass
else:
return 0
# Take a list of unique genes between two organisms and return a mapping for gene name to a single character.
# This will be used to determine the Levenshtein edit distance.
def make_gene_string_dict(gene_list):
result = {}
for gene, index in zip(gene_list, range(0,len(gene_list))):
print gene
result.update({gene:chr(65+index)})
#print operon, result[operon], len(result)
return result
# This function will take a list of ordered homologs, and groups them by a max_gap constraint.
# The return is a list of lists. Single genes and gene blocks will both be represented as groups.
def homolog_list_grouping_function(list_homologs, max_gap):
result = []
neighborhood = [list_homologs[0]]
for i in list_homologs[1:]:
#look at current
start = neighborhood[-1].start() #start = list_homologs[i].start()
stop = neighborhood[-1].stop() #stop = list_homologs[i].stop()
# look at next
start_n = i.start() #start_n = list_homologs[i+1].start()
stop_n = i.stop() #stop_n = list_homologs[i+1].stop()
# We have found neighboring genes, as defined by max_gap
if math.fabs(start - stop_n) < max_gap or math.fabs(stop - start_n) < max_gap:
neighborhood.append(i)
# These genes do not neighbor eachother
else:
result.append(neighborhood)
neighborhood = [i]
result.append(neighborhood)
#print list_homologs[0].organism(), "result", result, "neighborhood_found ", neighborhood_found
return result
def main():
start = time.time()
parsed_args = parser_code()
gene_block_file, infolder, outfolder, operon_filter_file, num_proc = check_options(parsed_args)
#print gene_block_file, infolder, outfolder, operon_filter_file, num_proc
# Step 1: Filter the list of gene blocks from the input folder that we are interested in,
# and return information about them for use in later steps.
file_list = return_file_list(infolder, operon_filter_file)
# Step 2: Parse the results using the homolog class, and store result as a dictionary that is keyed on the accession number.
# we use the accession number since it is the organisimal unique that the rest of the scripts in the project use.
sorted_gene_block_results = return_gene_block_organism_data(file_list)
# Step 3: Calculate the gene counts and number of splits.
# In later versions of the script, it may be necessary to add more events, and this code will have to adapt to that.
# This is the point of insertion for new code to carry the raw information that is used later for event determination.
# TODO: make commandline argument!
max_gap = 500
# return a dict that contains the raw information needed to make a between organismal compairison of the events used in our method.
event_reporting_data_structure = make_event_reporting_data_structure(sorted_gene_block_results, max_gap)
# Step 4: Actually do the compairison and report the result.
# It will be keyed {gene_block:{NC1:{NC2:{event1:numeric, event2:numeric, etc:numeric}}}}
event_count_dict = return_event_counts_as_dict(event_reporting_data_structure)
print "Length", len(event_count_dict.keys()), event_count_dict.keys()
outfile = './event_dict.p'
pickle.dump(event_count_dict, open(outfile, 'w'))
#parallel_list_param = [(i, outfolder, max_gap, e_val) for i in file_list]
#parse_operon_result_files(infolder, outfolder, operon_filter_file)
print time.time() - start
if __name__ == '__main__':
main()
| reamdc1/gene_block_evolution | make_event_distance_matrix.py | Python | gpl-3.0 | 23,885 | [
"BLAST"
] | 3bb98ec4dedbfcfccbaea2fd00989a9b414f117a46cda0c47e9416399b0eab3f |
from __future__ import print_function
# -*- coding: utf-8 -*-
"""This module defines I/O routines with CASTEP files.
The key idea is that all function accept or return atoms objects.
CASTEP specific parameters will be returned through the <atoms>.calc
attribute.
"""
from numpy import sqrt, radians, sin, cos, matrix, array, cross, float32, dot
import ase
from ase.constraints import FixAtoms, FixCartesian
from ase.parallel import paropen
import os
__all__ = [
'read_castep',
'read_cell',
'read_geom',
'read_param',
'read_seed',
'write_cell',
'write_param',
]
def write_cell(filename, atoms, positions_frac=False, castep_cell=None,
force_write=False):
"""This CASTEP export function write minimal information to
a .cell file. If the atoms object is a trajectory, it will
take the last image.
"""
if atoms is None:
print("Atoms object not initialized")
return False
if isinstance(atoms, list):
if len(atoms) > 1:
atoms = atoms[-1]
if os.path.isfile(filename) and not force_write:
print('ase.io.castep.write_param: Set optional argument')
print('force_write=True to overwrite %s.' % filename)
return False
fd = open(filename, 'w')
fd.write('#######################################################\n')
fd.write('#CASTEP cell file: %s\n' % filename)
fd.write('#Created using the Atomic Simulation Environment (ASE)#\n')
fd.write('#######################################################\n\n')
fd.write('%BLOCK LATTICE_CART\n')
cell = matrix(atoms.get_cell())
for line in atoms.get_cell():
fd.write(' %.10f %.10f %.10f\n' % tuple(line))
fd.write('%ENDBLOCK LATTICE_CART\n\n\n')
if positions_frac:
keyword = 'POSITIONS_FRAC'
positions = array(atoms.get_positions() * cell.I)
else:
keyword = 'POSITIONS_ABS'
positions = atoms.get_positions()
if atoms.get_initial_magnetic_moments().any():
pos_block = [('%s %8.6f %8.6f %8.6f SPIN=%4.2f' %
(x, y[0], y[1], y[2], m)) for (x, y, m)
in zip(atoms.get_chemical_symbols(),
positions,
atoms.get_initial_magnetic_moments())]
else:
pos_block = [('%s %8.6f %8.6f %8.6f' %
(x, y[0], y[1], y[2])) for (x, y)
in zip(atoms.get_chemical_symbols(),
positions)]
fd.write('%%BLOCK %s\n' % keyword)
for line in pos_block:
fd.write(' %s\n' % line)
fd.write('%%ENDBLOCK %s\n\n' % keyword)
# if atoms, has a CASTEP calculator attached, then only
# write constraints if really necessary
if hasattr(atoms, 'calc')\
and hasattr(atoms.calc, 'param')\
and hasattr(atoms.calc.param, 'task'):
task = atoms.calc.param.task
if atoms.calc.param.task.value is None:
suppress_constraints = True
elif task.value.lower() not in [
'geometryoptimization',
'moleculardynamics',
'transitionstatesearch',
'phonon']:
suppress_constraints = True
else:
suppress_constraints = False
else:
suppress_constraints = True
constraints = atoms.constraints
if len(constraints) and not suppress_constraints:
fd.write("%BLOCK IONIC_CONSTRAINTS \n")
count = 0
for constr in constraints:
if not isinstance(constr, FixAtoms)\
and not isinstance(constr, FixCartesian)\
and not suppress_constraints:
print('Warning: you have constraints in your atoms, that are')
print(' not supported by CASTEP')
break
if isinstance(constr, FixAtoms):
# sorry, for this complicated block
# reason is that constraint.index can either
# hold booleans or integers and in both cases
# it is an numpy array, so no simple comparison works
for n, val in enumerate(constr.index):
if val.dtype.name.startswith('bool'):
if not val:
continue
symbol = atoms.get_chemical_symbols()[n]
nis = atoms.calc._get_number_in_species(n)
elif val.dtype.name.startswith('int'):
symbol = atoms.get_chemical_symbols()[val]
nis = atoms.calc._get_number_in_species(val)
else:
raise UserWarning('Unrecognized index in' + \
' constraint %s' % constr)
fd.write("%6d %3s %3d 1 0 0 \n" % (count + 1,
symbol,
nis))
fd.write("%6d %3s %3d 0 1 0 \n" % (count + 2,
symbol,
nis))
fd.write("%6d %3s %3d 0 0 1 \n" % (count + 3,
symbol,
nis))
count += 3
elif isinstance(constr, FixCartesian):
n = constr.a
symbol = atoms.get_chemical_symbols()[n]
nis = atoms.calc._get_number_in_species(n)
fix_cart = - constr.mask + 1
if fix_cart[0]:
count += 1
fd.write("%6d %3s %3d 1 0 0 \n" % (count, symbol, nis))
if fix_cart[1]:
count += 1
fd.write("%6d %3s %3d 0 1 0 \n" % (count, symbol, nis))
if fix_cart[2]:
count += 1
fd.write("%6d %3s %3d 0 0 1 \n" % (count, symbol, nis))
fd.write("%ENDBLOCK IONIC_CONSTRAINTS \n")
if castep_cell is None:
if hasattr(atoms, 'calc') and hasattr(atoms.calc, 'cell'):
castep_cell = atoms.calc.cell
else:
fd.close()
return True
for option in castep_cell._options.values():
if option.value is not None:
if option.type == 'Block':
fd.write('%%BLOCK %s\n' % option.keyword.upper())
fd.write(option.value)
fd.write('\n%%ENDBLOCK %s\n' % option.keyword.upper())
else:
fd.write('%s : %s\n' % (option.keyword.upper(), option.value))
fd.close()
return True
def read_cell(filename, _=None):
"""Read a .cell file and return an atoms object.
Any value found that does not fit the atoms API
will be stored in the atoms.calc attribute.
"""
from ase.calculators.castep import Castep
calc = Castep()
fileobj = open(filename)
lines = fileobj.readlines()
fileobj.close()
def get_tokens(lines, l):
"""Tokenizes one line of a *cell file."""
comment_chars = "#!"
while l < len(lines):
line = lines[l].strip()
if len(line) == 0:
l += 1
continue
elif any([line.startswith(comment_char)
for comment_char in comment_chars]):
l += 1
continue
else:
for c in comment_chars:
if c in line:
icomment = min(line.index(c))
else:
icomment = len(line)
tokens = line[:icomment].split()
return tokens, l + 1
tokens = ""
print("read_cell: Warning - get_tokens has not found any more tokens")
return tokens, l
lat = []
have_lat = False
pos = []
spec = []
constraints = []
raw_constraints = {}
have_pos = False
pos_frac = False
l = 0
while l < len(lines):
tokens, l = get_tokens(lines, l)
if not tokens:
continue
elif tokens[0].upper() == "%BLOCK":
if tokens[1].upper() == "LATTICE_CART" and not have_lat:
tokens, l = get_tokens(lines, l)
if len(tokens) == 1:
print('read_cell: Warning - ignoring unit specifier in')
print('%BLOCK LATTICE_CART (assuming Angstrom instead)')
tokens, l = get_tokens(lines, l)
for _ in range(3):
lat_vec = [float(a) for a in tokens[0:3]]
lat.append(lat_vec)
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring more than three')
print('lattice vectors in invalid %BLOCK LATTICE_CART')
print('%s ...' % tokens[0].upper())
have_lat = True
elif tokens[1].upper() == "LATTICE_ABC" and not have_lat:
tokens, l = get_tokens(lines, l)
if len(tokens) == 1:
print('read_cell: Warning - ignoring unit specifier in')
print('%BLOCK LATTICE_ABC (assuming Angstrom instead)')
tokens, l = get_tokens(lines, l)
a, b, c = map(float, tokens[0:3])
tokens, l = get_tokens(lines, l)
alpha, beta, gamma = [radians(float(phi)) for phi in tokens[0:3]]
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring additional lines in')
print('invalid %BLOCK LATTICE_ABC')
lat_a = [a, 0, 0]
lat_b = [b * cos(gamma), b * sin(gamma), 0]
lat_c1 = c * cos(beta)
lat_c2 = c * (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma)
lat_c3 = sqrt(c * c - lat_c1 * lat_c1 - lat_c2 * lat_c2)
lat_c = [lat_c1, lat_c2, lat_c3]
lat = [lat_a, lat_b, lat_c]
have_lat = True
elif tokens[1].upper() == "POSITIONS_ABS" and not have_pos:
tokens, l = get_tokens(lines, l)
if len(tokens) == 1:
print('read_cell: Warning - ignoring unit specifier in')
print('%BLOCK POSITIONS_ABS(assuming Angstrom instead)')
tokens, l = get_tokens(lines, l)
while len(tokens) == 4:
spec.append(tokens[0])
pos.append([float(p) for p in tokens[1:4]])
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring invalid lines in')
print('%%BLOCK POSITIONS_ABS:\n\t %s' % tokens)
have_pos = True
elif tokens[1].upper() == "POSITIONS_FRAC" and not have_pos:
pos_frac = True
tokens, l = get_tokens(lines, l)
while len(tokens) == 4:
spec.append(tokens[0])
pos.append([float(p) for p in tokens[1:4]])
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring invalid lines')
print('%%BLOCK POSITIONS_FRAC:\n\t %s' % tokens)
have_pos = True
elif tokens[1].upper() == 'SPECIES_POT':
tokens, l = get_tokens(lines, l)
while tokens and not tokens[0].upper() == '%ENDBLOCK':
if len(tokens) == 2:
calc.cell.species_pot = tuple(tokens)
tokens, l = get_tokens(lines, l)
elif tokens[1].upper() == 'IONIC_CONSTRAINTS':
while True:
if tokens and tokens[0].upper() == '%ENDBLOCK':
break
tokens, l = get_tokens(lines, l)
if not len(tokens) == 6:
continue
_, species, nic, x, y, z = tokens
nic = int(nic)
if (species, nic) not in raw_constraints:
raw_constraints[(species, nic)] = []
raw_constraints[(species, nic)].append(array(
[x, y, z]))
else:
print('Warning: the keyword %s is not' % tokens[1].upper())
print(' interpreted in cell files')
while not tokens[0].upper() == '%ENDBLOCK':
tokens, l = get_tokens(lines, l)
#raise UserWarning
else:
key = tokens[0]
value = ' '.join(tokens[1:])
try:
calc.__setattr__(key, value)
except:
print("Problem setting calc.cell.%s = %s" % (key, value))
raise
if pos_frac:
atoms = ase.Atoms(
calculator=calc,
cell=lat,
pbc=True,
scaled_positions=pos,
symbols=spec,
)
else:
atoms = ase.Atoms(
calculator=calc,
cell=lat,
pbc=True,
positions=pos,
symbols=spec,
)
fixed_atoms = []
for (species, nic), value in raw_constraints.items():
absolute_nr = atoms.calc._get_absolute_number(species, nic)
if len(value) == 3:
fixed_atoms.append(absolute_nr)
elif len(value) == 2:
constraint = ase.constraints.FixedLine(a=absolute_nr,
direction=cross(value[0], value[1]))
constraints.append(constraint)
elif len(value) == 1:
constraint = ase.constraints.FixedPlane(a=absolute_nr,
direction=array(value[0], dtype=float32))
constraints.append(constraint)
else:
print('Error: Found %s statements attached to atoms %s'
% (len(value), absolute_nr))
constraints.append(ase.constraints.FixAtoms(fixed_atoms))
atoms.set_constraint(constraints)
# needs to go here again to have the constraints in
# atoms.calc.atoms.constraints as well
atoms.calc.atoms = atoms
atoms.calc.push_oldstate()
return atoms
# this actually does not belong here
# think how one could join this with
# the ase.calculators.castep.Castep.read()
# in the future!
def read_castep(filename, _=-1):
"""Reads a .castep file and returns an atoms object.
The calculator information will be stored in the calc attribute.
If more than one SCF step is found, a list of all steps
will be stored in the traj attribute.
Note that the index argument has no effect as of now.
"""
from ase.calculators.singlepoint import SinglePointCalculator
fileobj = open(filename)
lines = fileobj.readlines()
fileobj.close()
traj = []
energy_total = None
energy_0K = None
for i, line in enumerate(lines):
if 'NB est. 0K energy' in line:
energy_0K = float(line.split()[6])
elif 'Final energy, E' in line:
energy_total = float(line.split()[4])
elif 'Unit Cell' in line:
cell = [x.split()[0:3] for x in lines[i + 3:i + 6]]
cell = array([[float(col) for col in row] for row in cell])
elif 'Cell Contents' in line:
geom_starts = i
start_found = False
for j, jline in enumerate(lines[geom_starts:]):
if jline.find('xxxxx') > 0 and start_found:
geom_stop = j + geom_starts
break
if jline.find('xxxx') > 0 and not start_found:
geom_start = j + geom_starts + 4
start_found = True
species = [line.split()[1] for line in lines[geom_start:geom_stop]]
geom = dot(array([[float(col) for col in line.split()[3:6]]
for line in lines[geom_start:geom_stop]]), cell)
elif 'Writing model to' in line:
atoms = ase.Atoms(
cell=cell,
pbc=True,
positions=geom,
symbols=''.join(species),
)
# take 0K energy where available, else total energy
if energy_0K:
energy = energy_0K
else:
energy = energy_total
# generate a minimal single-point calculator
sp_calc = SinglePointCalculator(atoms=atoms,
energy=energy,
forces=None,
magmoms=None,
stress=None,
)
atoms.set_calculator(sp_calc)
traj.append(atoms)
return traj
def read_param(filename, calc=None):
"""Reads a param file. If an Castep object is passed as the
second argument, the parameter setings are merged into
the existing object and returned. Otherwise a new Castep()
calculator instance gets created and returned.
Parameters:
filename: the .param file. Only opens reading
calc: [Optional] calculator object to hang parameters onto
"""
if calc is None:
calc = ase.calculators.castep.Castep(check_castep_version=False)
calc.merge_param(filename)
return calc
def write_param(filename, param, check_checkfile=False,
force_write=False,
interface_options=None):
"""Writes a CastepParam object to a CASTEP .param file
Parameters:
filename: the location of the file to write to. If it
exists it will be overwritten without warning. If it
doesn't it will be created.
param: a CastepParam instance
check_checkfile : if set to True, write_param will
only write continuation or reuse statement
if a restart file exists in the same directory
"""
if os.path.isfile(filename) and not force_write:
print('ase.io.castep.write_param: Set optional argument')
print('force_write=True to overwrite %s.' % filename)
return False
out = paropen(filename, 'w')
out.write('#######################################################\n')
out.write('#CASTEP param file: %s\n' % filename)
out.write('#Created using the Atomic Simulation Environment (ASE)#\n')
if interface_options is not None:
out.write('# Internal settings of the calculator\n')
out.write('# This can be switched off by settings\n')
out.write('# calc._export_settings = False\n')
out.write('# If stated, this will be automatically processed\n')
out.write('# by ase.io.castep.read_seed()\n')
for option, value in sorted(interface_options.items()):
out.write('# ASE_INTERFACE %s : %s\n' % (option, value))
out.write('#######################################################\n\n')
for keyword, opt in sorted(param._options.items()):
if opt.type == 'Defined':
if opt.value is not None:
out.write('%s\n' % (option))
elif opt.value is not None:
if keyword in ['continuation', 'reuse'] and check_checkfile:
if opt.value == 'default':
if not os.path.exists('%s.%s'\
% (os.path.splitext(filename)[0], 'check')):
continue
elif not os.path.exists(opt.value):
continue
out.write('%s : %s\n'
% (keyword, opt.value))
out.close()
def read_geom(filename, _=-1):
"""Reads a .geom file produced by the CASTEP GeometryOptimization task and
returns an atoms object.
The information about total free energy and forces of each atom for every
relaxation step will be stored for further analysis especially in a
single-point calculator.
Note that everything in the .geom file is in atomic units, which has
been conversed to commonly used unit angstrom(length) and eV (energy).
Note that the index argument has no effect as of now.
Contribution by Wei-Bing Zhang. Thanks!
"""
from ase.calculators.singlepoint import SinglePointCalculator
fileobj = open(filename)
txt = fileobj.readlines()
fileobj.close()
traj = []
# Source: CODATA2002, used by default
# in CASTEP 5.01
# but check with your version in case of error
# ase.units is based on CODATA1986/
# here we hard-code from http://physics.nist.gov/cuu/Document/all_2002.pdf
Hartree = 27.211384565719481
Bohr = 0.5291772108
print('N.B.: Energy in .geom file is not 0K extrapolated.')
for i, line in enumerate(txt):
if line.find("<-- E") > 0:
start_found = True
energy = float(line.split()[0]) * Hartree
cell = [x.split()[0:3] for x in txt[i + 1:i + 4]]
cell = array([[float(col) * Bohr for col in row] for row in
cell])
if line.find('<-- R') > 0 and start_found:
start_found = False
geom_start = i
for i, line in enumerate(txt[geom_start:]):
if line.find('<-- F') > 0:
geom_stop = i + geom_start
break
species = [line.split()[0] for line in
txt[geom_start:geom_stop]]
geom = array([[float(col) * Bohr for col in
line.split()[2:5]] for line in txt[geom_start:geom_stop]])
forces = array([[float(col) * Hartree / Bohr for col in
line.split()[2:5]] for line in
txt[geom_stop:geom_stop + (geom_stop - geom_start)]])
image = ase.Atoms(species, geom, cell=cell, pbc=True)
image.set_calculator(SinglePointCalculator(energy, forces, None,
None, image))
traj.append(image)
return traj
def read_seed(seed, new_seed=None, ignore_internal_keys=False):
"""A wrapper around the CASTEP Calculator in conjunction with
read_cell and read_param. Basically this can be used to reuse
a previous calculation which results in a triple of
cell/param/castep file. The label of the calculation if pre-
fixed with `cop_of_` and everything else will be recycled as
much as possible from the addressed calculation.
"""
directory = os.path.abspath(os.path.dirname(seed))
seed = os.path.basename(seed)
paramfile = os.path.join(directory, '%s.param' % seed)
cellfile = os.path.join(directory, '%s.cell' % seed)
castepfile = os.path.join(directory, '%s.castep' % seed)
atoms = read_cell(cellfile)
atoms.calc._directory = directory
atoms.calc._rename_existing_dir = False
atoms.calc._castep_pp_path = directory
atoms.calc.merge_param(paramfile,
ignore_internal_keys=ignore_internal_keys)
if new_seed is None:
atoms.calc._label = 'copy_of_%s' % seed
else:
atoms.calc._label = str(new_seed)
if os.path.isfile(castepfile):
# _set_atoms needs to be True here
# but we set it right back to False
atoms.calc._set_atoms = True
atoms.calc.read(castepfile)
atoms.calc._set_atoms = False
# sync the top-level object with the
# one attached to the calculator
atoms = atoms.calc.atoms
else:
print('Corresponding CASTEP not found.')
atoms.calc.push_oldstate()
return atoms
| suttond/MODOI | ase/io/castep.py | Python | lgpl-3.0 | 23,925 | [
"ASE",
"CASTEP"
] | 32122a632914007ecf90a702c6e6ffb31d007d46e1e67d1357b7efa7969ea8d5 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
import webnotes.utils
from webnotes.utils import add_days, cint, cstr, flt, getdate, nowdate, _round
from webnotes.utils import cstr, flt, getdate
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint
from webnotes.model.mapper import get_mapped_doclist
from webnotes.model.doc import addchild
from controllers.selling_controller import SellingController
import time
import calendar
class DocType(SellingController):
def __init__(self, doc, doclist=None):
self.doc = doc
if not doclist: doclist = []
self.doclist = doclist
self.tname = 'Sales Order Item'
self.fname = 'sales_order_details'
self.person_tname = 'Target Detail'
self.partner_tname = 'Partner Target Detail'
self.territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.doc.delivery_date:
if getdate(self.doc.transaction_date) > getdate(self.doc.delivery_date):
msgprint("Expected Delivery Date cannot be before Sales Order Date")
raise Exception
def validate_po(self):
# validate p.o date v/s delivery date
if self.doc.po_date and self.doc.delivery_date and getdate(self.doc.po_date) > getdate(self.doc.delivery_date):
msgprint("Expected Delivery Date cannot be before Purchase Order Date")
raise Exception
if self.doc.po_no and self.doc.customer:
so = webnotes.conn.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.doc.po_no, self.doc.name, self.doc.customer))
if so and so[0][0]:
msgprint("""Another Sales Order (%s) exists against same PO No and Customer.
Please be sure, you are not making duplicate entry.""" % so[0][0])
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in getlist(self.doclist, 'sales_order_details'):
e = [d.item_code, d.description, d.reserved_warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if webnotes.conn.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.reserved_warehouse:
msgprint("""Please enter Reserved Warehouse for item %s
as it is stock Item""" % d.item_code, raise_exception=1)
if e in check_list:
msgprint("Item %s has been entered twice." % d.item_code)
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
msgprint("Item %s has been entered twice." % d.item_code)
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.doc.transaction_date
tot_avail_qty = webnotes.conn.sql("select projected_qty from `tabBin` \
where item_code = '%s' and warehouse = '%s'" % (d.item_code,d.reserved_warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in getlist(self.doclist, 'sales_order_details'):
if d.prevdoc_docname:
res = webnotes.conn.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.doc.order_type))
if not res:
msgprint("""Order Type (%s) should be same in Quotation: %s \
and current Sales Order""" % (self.doc.order_type, d.prevdoc_docname))
#msgprint(res)
def validate_order_type(self):
super(DocType, self).validate_order_type()
def validate_delivery_date(self):
if self.doc.order_type == 'Sales' and not self.doc.delivery_date:
msgprint("Please enter 'Expected Delivery Date'")
raise Exception
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.doc.project_name and self.doc.customer_name:
res = webnotes.conn.sql("select name from `tabProject` where name = '%s' and (customer = '%s' or ifnull(customer,'')='')"%(self.doc.project_name, self.doc.customer))
if not res:
msgprint("Customer - %s does not belong to project - %s. \n\nIf you want to use project for multiple customers then please make customer details blank in project - %s."%(self.doc.customer,self.doc.project_name,self.doc.project_name))
raise Exception
def validate(self):
super(DocType, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
self.validate_taxentry()
from stock.doctype.packed_item.packed_item import make_packing_list
self.doclist = make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.doc.status:
self.doc.status = "Draft"
import utilities
utilities.validate_status(self.doc.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.doc.billing_status: self.doc.billing_status = 'Not Billed'
if not self.doc.delivery_status: self.doc.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from stock.utils import validate_warehouse_user, validate_warehouse_company
warehouses = list(set([d.reserved_warehouse for d in
self.doclist.get({"doctype": self.tname}) if d.reserved_warehouse]))
for w in warehouses:
validate_warehouse_user(w)
validate_warehouse_company(w, self.doc.company)
def validate_taxentry(self):
#count=0
length=len(getlist(self.doclist, 'other_charges'))
#webnotes.errprint(length)
if length>=1:
pass
else:
#webnotes.errprint(count)
webnotes.msgprint("Minimum one entry must be specify in tax table",raise_exception=1);
def validate_with_previous_doc(self):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = webnotes.conn.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
webnotes.conn.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in self.doclist.get_distinct_values("prevdoc_docname"):
#webnotes.errprint(internal)
bean = webnotes.bean("Quotation", quotation)
#webnotes.errprint(bean)
if bean.doc.docstatus==2:
webnotes.throw( internal + ": " + webnotes._("Quotation is cancelled."))
#bean.get_controller().set_status(update=True)
def on_submit(self):
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.doc.grand_total)
get_obj('Authorization Control').validate_approving_authority(self.doc.doctype, self.doc.grand_total, self)
self.update_prevdoc_status('submit')
webnotes.conn.set(self.doc, 'status', 'Submitted')
#self.cal_salestarget()
#ef get_installation_details(self):
# def cal_salestarget(self):
# webnotes.errprint("in sales target")
# #import time
# ## dd/mm/yyyy format
# r=time.strftime("%Y-%m-%d")
# webnotes.errprint(r)
# webnotes.errprint(getdate(r).month)
# m=getdate(r).month
# webnotes.errprint(m)
# m_name=calendar.month_name[m]
# webnotes.errprint(m_name)
# if m==1 or m==2 or m==3:
# webnotes.errprint("in first")
# s_date='2014-01-01'
# e_date='2014-03-31'
# webnotes.errprint(s_date)
# webnotes.errprint(e_date)
# elif m==4 or m==5 or m==6:
# webnotes.errprint("in second")
# s_date='2014-04-01'
# e_date='2014-06-30'
# elif m==7 or m==8 or m==9:
# webnotes.errprint("in third")
# s_date='2014-07-01'
# e_date='2014-09-30'
# else :
# webnotes.errprint("in forth")
# s_date='2014-07-01'
# e_date='2014-12-31'
# #webnotes.errprint([select name from `tabSales Order` where transaction_date between %s and %s ,(s_date,e_date))]
# qry1=webnotes.conn.sql("""select parent,target_amount ,variable_pay from `tabTarget Detail` where parent in (select sales_person from `tabSales Team` where parent in
# (select name from `tabSales Order` where transaction_date between %s and %s
# group by sales_person)) """,(s_date,e_date) ,debug=1)
# webnotes.errprint(qry1)
# #list1=[]
# for i in qry1:
# #webnotes.errprint(i[0])
# qr=webnotes.conn.sql("""select distribution_id from `tabSales Person` where name=%s""",i[0],as_list=1)
# #webnotes.errprint(qr)
# if m_name=='January' or m_name=='February' or m_name=='March':
# month='January-March'
# elif m_name=='April' or m_name=='May' or m_name=='June':
# month='April-June'
# elif m_name=='July' or m_name=='August' or m_name=='September':
# month='July-September'
# else:
# month='October-December'
# webnotes.errprint(month)
# qt=webnotes.conn.sql(""" select percentage_allocation/100 from `tabBudget Distribution Detail` where
# month=%s and parent=%s""",(month,qr[0][0]))
# webnotes.errprint(qt[0][0])
# #webnotes.errprint(qt[0][0]*i[1])
# amt=qt[0][0]*i[1]
# webnotes.errprint(amt)
# qry=webnotes.conn.sql(""" select sum(allocated_amount) as amount from `tabSales Team` where parent in
# (select name from `tabSales Order` where transaction_date between %s and %s )
# and sales_person=%s """,(s_date,e_date,i[0]))
# webnotes.errprint(qry)
# name=webnotes.conn.sql("""select employee from `tabSales Person` where name=%s """,i[0],as_list=1)
# webnotes.errprint(name[0][0])
# #webnotes.errprint(["pay",i[2]])
# t= ((qry[0][0]/amt)*100)/100
# webnotes.errprint(t)
# pay= i[2]*t
# webnotes.errprint(pay)
def on_cancel(self):
# Cannot cancel stopped SO
if self.doc.status == 'Stopped':
msgprint("Sales Order : '%s' cannot be cancelled as it is Stopped. Unstop it for any further transactions" %(self.doc.name))
raise Exception
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
webnotes.conn.set(self.doc, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = webnotes.conn.sql("select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2 where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1", self.doc.name)
if submit_dn:
msgprint("Delivery Note : " + cstr(submit_dn[0][0]) + " has been submitted against " + cstr(self.doc.doctype) + ". Please cancel Delivery Note : " + cstr(submit_dn[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
# Checks Sales Invoice
submit_rv = webnotes.conn.sql("select t1.name from `tabSales Invoice` t1,`tabSales Invoice Item` t2 where t1.name = t2.parent and t2.sales_order = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_rv:
msgprint("Sales Invoice : " + cstr(submit_rv[0][0]) + " has already been submitted against " +cstr(self.doc.doctype)+ ". Please cancel Sales Invoice : "+ cstr(submit_rv[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
#check maintenance schedule
submit_ms = webnotes.conn.sql("select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1",self.doc.name)
if submit_ms:
msgprint("Maintenance Schedule : " + cstr(submit_ms[0][0]) + " has already been submitted against " +cstr(self.doc.doctype)+ ". Please cancel Maintenance Schedule : "+ cstr(submit_ms[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
# check maintenance visit
submit_mv = webnotes.conn.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1",self.doc.name)
if submit_mv:
msgprint("Maintenance Visit : " + cstr(submit_mv[0][0]) + " has already been submitted against " +cstr(self.doc.doctype)+ ". Please cancel Maintenance Visit : " + cstr(submit_mv[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
# check production order
pro_order = webnotes.conn.sql("""select name from `tabProduction Order` where sales_order = %s and docstatus = 1""", self.doc.name)
if pro_order:
msgprint("""Production Order: %s exists against this sales order.
Please cancel production order first and then cancel this sales order""" %
pro_order[0][0], raise_exception=1)
def check_modified_date(self):
mod_db = webnotes.conn.sql("select modified from `tabSales Order` where name = '%s'" % self.doc.name)
date_diff = webnotes.conn.sql("select TIMEDIFF('%s', '%s')" % ( mod_db[0][0],cstr(self.doc.modified)))
if date_diff and date_diff[0][0]:
msgprint("%s: %s has been modified after you have opened. Please Refresh"
% (self.doc.doctype, self.doc.name), raise_exception=1)
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
webnotes.conn.set(self.doc, 'status', 'Stopped')
msgprint("""%s: %s has been Stopped. To make transactions against this Sales Order
you need to Unstop it.""" % (self.doc.doctype, self.doc.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
webnotes.conn.set(self.doc, 'status', 'Submitted')
msgprint("%s: %s has been Unstopped" % (self.doc.doctype, self.doc.name))
def update_stock_ledger(self, update_stock):
from stock.utils import update_bin
for d in self.get_item_list():
if webnotes.conn.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.doc.transaction_date,
"voucher_type": self.doc.doctype,
"voucher_no": self.doc.name,
"is_amended": self.doc.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_portal_page(self):
return "order" if self.doc.docstatus==1 else None
def get_accessories_details(self,item):
#webnotes.errprint(item)
qry=webnotes.conn.sql("select default_warehouse,stock_uom from `tabItem` where name='"+item+"'")
#webnotes.errprint(qry[0][1])
q=webnotes.conn.sql("select description from `tabItem` where name='"+item+"'")
#webnotes.errprint(q[0][0])
qr=webnotes.conn.sql("select sum(actual_qty) from `tabStock Ledger Entry` where item_code='"+item+"' and warehouse='"+qry[0][0]+"'")
#webnotes.errprint(qr)
ch = addchild(self.doc, 'sales_order_details',
'Sales Order Item', self.doclist)
#webnotes.errprint(ch)
ch.item_code= item
ch.item_name= item
ch.description= q[0][0]
ch.qty= 1.00
ch.export_rate=0.00
ch.reserved_warehouse= qry[0][0]
ch.stock_uom=qry[0][1]
ch.actual_qty=qr[0][0]
ch.adj_rate=0.00
ch.save(new=1)
def set_missing_values(source, target):
bean = webnotes.bean(target)
bean.run_method("onload_post_render")
@webnotes.whitelist()
def make_internal_order(source_name, target_doclist=None):
#webnotes.errprint("in internal order")
return _make_internal_order(source_name, target_doclist)
def _make_internal_order(source_name, target_doclist=None, ignore_permissions=False):
#webnotes.errprint("in make internal order 2")
from webnotes.model.mapper import get_mapped_doclist
customer = _make_customer(source_name, ignore_permissions)
def set_missing_values(source, target):
if customer:
target[0].customer = customer.doc.name
target[0].customer_name = customer.doc.customer_name
si = webnotes.bean(target)
si.run_method("onload_post_render")
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Internal Order Form",
"validation": {
"docstatus": ["=", 1]
}
},
"Accessories Details": {
"doctype": "Accessories Details",
"field_map": {
"parent": "prevdoc_docname"
}
},
"Customer Details": {
"doctype": "Internal Order Customer Details",
"field_map": {
"parent": "prevdoc_docname"
}
},
"Consignee Details": {
"doctype": "Internal Order Consignee Details",
"field_map": {
"parent": "prevdoc_docname"
}
},
"Sales Order Item": {
"doctype": "Internal Order Item Details",
"field_map": {
"parent": "prevdoc_docname"
}
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doclist, set_missing_values, ignore_permissions=ignore_permissions)
# postprocess: fetch shipping address, set missing values
return [d.fields for d in doclist]
def _make_customer(source_name, ignore_permissions=False):
quotation = webnotes.conn.get_value("Quotation", source_name, ["lead", "order_type"])
if quotation and quotation[0]:
lead_name = quotation[0]
customer_name = webnotes.conn.get_value("Customer", {"lead_name": lead_name})
if not customer_name:
from selling.doctype.lead.lead import _make_customer
customer_doclist = _make_customer(lead_name, ignore_permissions=ignore_permissions)
customer = webnotes.bean(customer_doclist)
customer.ignore_permissions = ignore_permissions
if quotation[1] == "Shopping Cart":
customer.doc.customer_group = webnotes.conn.get_value("Shopping Cart Settings", None,
"default_customer_group")
try:
customer.insert()
return customer
except NameError, e:
if webnotes.defaults.get_global_default('cust_master_name') == "Customer Name":
customer.run_method("autoname")
customer.doc.name += "-" + lead_name
customer.insert()
return customer
else:
raise
except webnotes.MandatoryError:
from webnotes.utils import get_url_to_form
webnotes.throw(_("Before proceeding, please create Customer from Lead") + \
(" - %s" % get_url_to_form("Lead", lead_name)))
@webnotes.whitelist()
def make_material_request(source_name, target_doclist=None):
def postprocess(source, doclist):
doclist[0].material_request_type = "Purchase"
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"reserved_warehouse": "warehouse",
"stock_uom": "uom"
}
}
}, target_doclist, postprocess)
return [(d if isinstance(d, dict) else d.fields) for d in doclist]
@webnotes.whitelist()
def make_delivery_note(source_name, target_doclist=None):
def update_item(obj, target, source_parent):
target.amount = (flt(obj.qty) - flt(obj.delivered_qty)) * flt(obj.basic_rate)
target.export_amount = (flt(obj.qty) - flt(obj.delivered_qty)) * flt(obj.export_rate)
target.qty = flt(obj.qty) - flt(obj.delivered_qty)
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"field_map": {
"shipping_address": "address_display",
"shipping_address_name": "customer_address",
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"export_rate": "export_rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
"reserved_warehouse": "warehouse"
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
@webnotes.whitelist()
def make_sales_invoice(source_name, target_doclist=None):
def set_missing_values(source, target):
bean = webnotes.bean(target)
bean.doc.is_pos = 0
bean.run_method("onload_post_render")
def update_item(obj, target, source_parent):
target.export_amount = flt(obj.export_amount) - flt(obj.billed_amt)
target.amount = target.export_amount * flt(source_parent.conversion_rate)
target.qty = obj.export_rate and target.export_amount / flt(obj.export_rate) or obj.qty
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
"reserved_warehouse": "warehouse"
},
"postprocess": update_item,
"condition": lambda doc: doc.amount==0 or doc.billed_amt < doc.export_amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
@webnotes.whitelist()
def make_maintenance_schedule(source_name, target_doclist=None):
maint_schedule = webnotes.conn.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doclist)
return [d.fields for d in doclist]
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
visit = webnotes.conn.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doclist)
return [d.fields for d in doclist]
| Tejal011089/med2-app | selling/doctype/sales_order/sales_order.py | Python | agpl-3.0 | 23,576 | [
"VisIt"
] | e940111e17eb96515eb2750d870dad09df04a2216feecfe82aa882b7fa50317a |
"""
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
from functools import wraps
from collections import defaultdict
from numpy import NaN
import numpy as np
from pandas.core.api import DataFrame, Series, Panel, notnull
import pandas.algos as algos
import pandas.core.common as pdcom
from pandas.util.decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_corr_pairwise', 'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply', 'expanding_corr_pairwise']
#------------------------------------------------------------------------------
# Docs
# The order of arguments for the _doc_template is:
# (header, args, kwargs, returns, notes)
_doc_template = """
%s
Parameters
----------
%s%s
Returns
-------
%s
%s
"""
_roll_kw = """window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
how : string, default '%s'
Method for down- or re-sampling
"""
_roll_notes = r"""
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_ewm_kw = r"""com : float. optional
Center of mass: :math:`\alpha = 1 / (1 + com)`,
span : float, optional
Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`
halflife : float, optional
Specify decay in terms of halflife, :math:`\alpha = 1 - exp(log(0.5) / halflife)`
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
how : string, default 'mean'
Method for down- or re-sampling
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
"""
_ewm_notes = r"""
Notes
-----
Either center of mass or span must be specified
EWMA is sometimes specified using a "span" parameter `s`, we have that the
decay parameter :math:`\alpha` is related to the span as
:math:`\alpha = 2 / (s + 1) = 1 / (1 + c)`
where `c` is the center of mass. Given a span, the associated center of mass is
:math:`c = (s - 1) / 2`
So a "20-day EWMA" would have center 9.5.
When adjust is True (default), weighted averages are calculated using weights
(1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
"""
_expanding_kw = """min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
"""
_type_of_input_retval = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_pairwise_retval = "y : Panel whose items are df1.index values"
_unary_arg = "arg : Series, DataFrame\n"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray, optional
if not supplied then will default to arg1 and produce pairwise output
"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray
"""
_pairwise_arg = """df1 : DataFrame
df2 : DataFrame
"""
_pairwise_kw = """pairwise : bool, default False
If False then only matching columns between arg1 and arg2 will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
"""
_ddof_kw = """ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def rolling_count(arg, window, freq=None, center=False, how=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
arg = _conv_timerule(arg, freq, how)
if not center:
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(converted, window, min_periods=0,
center=center) # already converted
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
_roll_kw%'None'+_pairwise_kw+_ddof_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None,
center=False, pairwise=None, how=None, ddof=1):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods, center=center)
count = rolling_count(X + Y, window, center=center)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise))
return rs
@Substitution("Moving sample correlation.", _binary_arg_flex,
_roll_kw%'None'+_pairwise_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None,
center=False, pairwise=None, how=None):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, freq=freq,
center=center)
den = (rolling_std(a, window, min_periods, freq=freq,
center=center) *
rolling_std(b, window, min_periods, freq=freq,
center=center))
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise))
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and
isinstance(arg2,(np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if isinstance(arg1, (np.ndarray, Series)) and \
isinstance(arg2, (np.ndarray,Series)):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, DataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index, columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j<i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]))
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
return p
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
@Substitution("Deprecated. Use rolling_corr(..., pairwise=True) instead.\n\n"
"Pairwise moving sample correlation", _pairwise_arg,
_roll_kw%'None', _pairwise_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None,
freq=None, center=False):
import warnings
warnings.warn("rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)", FutureWarning)
return rolling_corr(df1, df2, window=window, min_periods=min_periods,
freq=freq, center=center,
pairwise=True)
def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
how=None, args=(), kwargs={}, **kwds):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
if values.size == 0:
result = values.copy()
else:
# actually calculate the moment. Faster way to do this?
offset = int((window - 1) / 2.) if center else 0
additional_nans = np.array([np.NaN] * offset)
calc = lambda x: func(np.concatenate((x, additional_nans)) if center else x,
window, minp=minp, args=args, kwargs=kwargs,
**kwds)
if values.ndim > 1:
result = np.apply_along_axis(calc, axis, values)
else:
result = calc(values)
if center:
result = _center_window(result, window, axis)
return return_hook(result)
def _center_window(rs, window, axis):
if axis > rs.ndim-1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = int((window - 1) / 2.)
if offset > 0:
if isinstance(rs, (Series, DataFrame, Panel)):
rs = rs.slice_shift(-offset, axis=axis)
else:
lead_indexer = [slice(None)] * rs.ndim
lead_indexer[axis] = slice(offset, None)
rs = np.copy(rs[tuple(lead_indexer)])
return rs
def _process_data_structure(arg, kill_inf=True):
if isinstance(arg, DataFrame):
return_hook = lambda v: type(arg)(v, index=arg.index,
columns=arg.columns)
values = arg.values
elif isinstance(arg, Series):
values = arg.values
return_hook = lambda v: Series(v, arg.index)
else:
return_hook = lambda v: v
values = arg
if not issubclass(values.dtype.type, float):
values = values.astype(float)
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return return_hook, values
#------------------------------------------------------------------------------
# Exponential moving moments
def _get_center_of_mass(com, span, halflife):
valid_count = len([x for x in [com, span, halflife] if x is not None])
if valid_count > 1:
raise Exception("com, span, and halflife are mutually exclusive")
if span is not None:
# convert span to center of mass
com = (span - 1) / 2.
elif halflife is not None:
# convert halflife to center of mass
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif com is None:
raise Exception("Must pass one of com, span, or halflife")
return float(com)
@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw,
_type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None,
adjust=True, how=None, ignore_na=False):
arg = _conv_timerule(arg, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _ewma(v):
return algos.ewma(v, com, int(adjust), int(ignore_na), int(min_periods))
return_hook, values = _process_data_structure(arg)
if values.size == 0:
output = values.copy()
else:
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
freq=None, how=None, ignore_na=False, adjust=True):
arg = _conv_timerule(arg, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _ewmvar(v):
return algos.ewmcov(v, v, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
return_hook, values = _process_data_structure(arg)
if values.size == 0:
output = values.copy()
else:
output = np.apply_along_axis(_ewmvar, 0, values)
return return_hook(output)
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
ignore_na=False, adjust=True):
result = ewmvar(arg, com=com, span=span, halflife=halflife,
min_periods=min_periods, bias=bias, adjust=adjust, ignore_na=ignore_na)
return _zsqrt(result)
ewmvol = ewmstd
@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
bias=False, freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _get_ewmcov(X, Y):
# X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
return_hook, x_values = _process_data_structure(X)
return_hook, y_values = _process_data_structure(Y)
cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
return return_hook(cov)
result = _flex_binary_moment(arg1, arg2, _get_ewmcov,
pairwise=bool(pairwise))
return result
@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _get_ewmcorr(X, Y):
# X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
return_hook, x_values = _process_data_structure(X)
return_hook, y_values = _process_data_structure(Y)
cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
x_var = algos.ewmcov(x_values, x_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
y_var = algos.ewmcov(y_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
corr = cov / _zsqrt(x_var * y_var)
return return_hook(corr)
result = _flex_binary_moment(arg1, arg2, _get_ewmcorr,
pairwise=bool(pairwise))
return result
def _zsqrt(x):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, DataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
#----------------------------------------------------------------------
# Python interface to Cython functions
def _conv_timerule(arg, freq, how):
types = (DataFrame, Series)
if freq is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.resample(freq, how=how)
return arg
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _rolling_func(func, desc, check_minp=_use_window, how=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'%s"%how
@Substitution(desc, _unary_arg, _roll_kw%how_arg_str + additional_kw,
_type_of_input_retval, _roll_notes)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, center=False, how=how,
**kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=center, how=how, **kwargs)
return f
rolling_max = _rolling_func(algos.roll_max2, 'Moving maximum.', how='max')
rolling_min = _rolling_func(algos.roll_min2, 'Moving minimum.', how='min')
rolling_sum = _rolling_func(algos.roll_sum, 'Moving sum.')
rolling_mean = _rolling_func(algos.roll_mean, 'Moving mean.')
rolling_median = _rolling_func(algos.roll_median_cython, 'Moving median.',
how='median')
_ts_std = lambda *a, **kw: _zsqrt(algos.roll_var(*a, **kw))
rolling_std = _rolling_func(_ts_std, 'Moving standard deviation.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
rolling_var = _rolling_func(algos.roll_var, 'Moving variance.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
rolling_skew = _rolling_func(algos.roll_skew, 'Unbiased moving skewness.',
check_minp=_require_min_periods(3))
rolling_kurt = _rolling_func(algos.roll_kurt, 'Unbiased moving kurtosis.',
check_minp=_require_min_periods(4))
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
center=False):
"""Moving quantile.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
def call_cython(arg, window, minp, args=(), kwargs={}):
minp = _use_window(minp, window)
return algos.roll_quantile(arg, window, minp, quantile)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=center)
def rolling_apply(arg, window, func, min_periods=None, freq=None,
center=False, args=(), kwargs={}):
"""Generic moving function application.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
offset = int((window - 1) / 2.) if center else 0
def call_cython(arg, window, minp, args, kwargs):
minp = _use_window(minp, window)
return algos.roll_generic(arg, window, minp, offset, func, args, kwargs)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=False, args=args, kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
freq=None, center=False, mean=True,
axis=0, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
if isinstance(window, (list, tuple, np.ndarray)):
if win_type is not None:
raise ValueError(('Do not specify window type if using custom '
'weights'))
window = pdcom._asarray_tuplesafe(window).astype(float)
elif pdcom.is_integer(window): # window size
if win_type is None:
raise ValueError('Must specify window type')
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window weight')
win_type = _validate_win_type(win_type, kwargs) # may pop from kwargs
window = sig.get_window(win_type, window).astype(float)
else:
raise ValueError('Invalid window %s' % str(window))
minp = _use_window(min_periods, len(window))
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
if values.size == 0:
result = values.copy()
else:
offset = int((len(window) - 1) / 2.) if center else 0
additional_nans = np.array([np.NaN] * offset)
f = lambda x: algos.roll_window(np.concatenate((x, additional_nans)) if center else x,
window, minp, avg=mean)
result = np.apply_along_axis(f, axis, values)
if center:
result = _center_window(result, len(window), axis)
return return_hook(result)
def _validate_win_type(win_type, kwargs):
# may pop from kwargs
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] +
_pop_args(win_type, arg_map[win_type], kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
def _expanding_func(func, desc, check_minp=_use_window, additional_kw=''):
@Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
_type_of_input_retval, "")
@Appender(_doc_template)
@wraps(func)
def f(arg, min_periods=1, freq=None, **kwargs):
window = max(len(arg), min_periods) if min_periods else len(arg)
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
**kwargs)
return f
expanding_max = _expanding_func(algos.roll_max2, 'Expanding maximum.')
expanding_min = _expanding_func(algos.roll_min2, 'Expanding minimum.')
expanding_sum = _expanding_func(algos.roll_sum, 'Expanding sum.')
expanding_mean = _expanding_func(algos.roll_mean, 'Expanding mean.')
expanding_median = _expanding_func(algos.roll_median_cython, 'Expanding median.')
expanding_std = _expanding_func(_ts_std, 'Expanding standard deviation.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
expanding_var = _expanding_func(algos.roll_var, 'Expanding variance.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
expanding_skew = _expanding_func(algos.roll_skew, 'Unbiased expanding skewness.',
check_minp=_require_min_periods(3))
expanding_kurt = _expanding_func(algos.roll_kurt, 'Unbiased expanding kurtosis.',
check_minp=_require_min_periods(4))
def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
Parameters
----------
arg : DataFrame or numpy ndarray-like
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
Returns
-------
expanding_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_count(arg, len(arg), freq=freq)
def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
----------
arg : Series, DataFrame
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods,
freq=freq)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw+_pairwise_kw+_ddof_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_cov(arg1, arg2, window,
min_periods=min_periods, freq=freq,
pairwise=pairwise, ddof=ddof)
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw+_pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_corr(arg1, arg2, window,
min_periods=min_periods,
freq=freq, pairwise=pairwise)
@Substitution("Deprecated. Use expanding_corr(..., pairwise=True) instead.\n\n"
"Pairwise expanding sample correlation", _pairwise_arg,
_expanding_kw, _pairwise_retval, "")
@Appender(_doc_template)
def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None):
import warnings
warnings.warn("expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)", FutureWarning)
return expanding_corr(df1, df2, min_periods=min_periods,
freq=freq, pairwise=True)
def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
Parameters
----------
arg : Series, DataFrame
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
window = max(len(arg), min_periods) if min_periods else len(arg)
return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq,
args=args, kwargs=kwargs)
| bdh1011/wau | venv/lib/python2.7/site-packages/pandas/stats/moments.py | Python | mit | 39,028 | [
"Gaussian"
] | 21a76d8d4038162fd5019ed4028287ddcec2bb0035f24e4373a9b57dfa2cac45 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""psi4doc_sptheme.ext.relbar_toc - adds TOC entry and icons to relbar.
Modeled from cloud_sptheme.ext.relbar_toc
"""
import os.path
import logging; log = logging.getLogger(__name__)
def insert_toc(app, pagename, templatename, ctx, event_arg):
links = ctx['rellinks']
# remove any existing toc (present on some pages)
for idx, entry in enumerate(links):
#if entry[3].lower() == "toc":
if entry[2] == 'C':
del links[idx]
break
# add arrows and caps to existing links
for idx, entry in enumerate(links):
if entry[3] == 'next':
#newlink = u'next \u2192'
#newlink = u'<i class="fa fa-arrow-circle-right fa-lg"></i>'
newlink = u'<i class="fa fa-long-arrow-right fa-lg"></i>'
elif entry[3] == 'previous':
#newlink = u'\u2190 previous'
#newlink = u'<i class="fa fa-arrow-circle-left fa-lg"></i>'
newlink = u'<i class="fa fa-long-arrow-left fa-lg"></i>'
elif entry[3] == 'index':
newlink = 'Index'
else:
continue
del links[idx]
links.insert(idx, (entry[0], entry[1], entry[2], newlink))
# insert our toc entry
#FIXME: there's probably a MUCH better / less broken way to do this
path = os.path.split(os.path.splitext(ctx['pathto']('index'))[0])[1]
#links.insert(len(links), (path, 'Table Of Contents', 'C', 'TOC'))
links.insert(len(links), (path, 'Table Of Contents', 'C', '<i class="fa fa-book fa-lg"></i>'))
def setup(app):
app.connect('html-page-context', insert_toc)
| rmcgibbo/psi4public | doc/sphinxman/source/psi4doc/ext/relbar_toc.py | Python | lgpl-3.0 | 2,511 | [
"Psi4"
] | 3a0045740158c51f12ba99adb082270d9bc3c48c7192ac43998d4cded44adefa |
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/lb_profile.py")
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
def test_fit(self):
np.testing.assert_allclose(sample.lb_fluid_profile[:, 0, 0, 2],
sample.expected_profile, atol=5e-2)
if __name__ == "__main__":
ut.main()
| espressomd/espresso | testsuite/scripts/samples/test_lb_profile.py | Python | gpl-3.0 | 1,170 | [
"ESPResSo"
] | ab54e0ec07f27b7f7cc8ea3b12ec1c303881cddc529e0986c6a703072234ff82 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LAMMPS documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 6 14:20:08 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# 2017-12-07: commented out, since this package is broken with Sphinx 16.x
# yet we can no longer use Sphinx 15.x, since that breaks with
# newer version of the multiprocessor module.
# 'sphinxcontrib.images',
images_config = {
'default_image_width' : '25%',
'default_group' : 'default'
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'Manual'
# General information about the project.
project = 'LAMMPS'
copyright = '2013 Sandia Corporation'
def get_lammps_version():
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(script_dir, '../../../src/version.h'), 'r') as f:
line = f.readline()
start_pos = line.find('"')+1
end_pos = line.find('"', start_pos)
return line[start_pos:end_pos]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_lammps_version()
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'lammps_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "LAMMPS documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LAMMPSdoc'
html_add_permalinks = ''
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('Manual', 'LAMMPS.tex', 'LAMMPS Documentation',
'Steve Plimpton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('Manual', 'liggghts', 'LAMMPS Documentation',
['Steve Plimpton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('Manual', 'LAMMPS', 'LAMMPS Documentation',
'LAMMPS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for ePUB output ----------------------------------------------
epub_title = 'LAMMPS Documentation - ' + get_lammps_version()
epub_cover = ('lammps-logo.png', '')
epub_description = """
This is the Manual for the LAMMPS software package.
LAMMPS stands for Large-scale Atomic/Molecular Massively Parallel
Simulator and is a classical molecular dynamics simulation code
designed to run efficiently on parallel computers. It was developed
at Sandia National Laboratories, a US Department of Energy facility,
with funding from the DOE. It is an open-source code, distributed
freely under the terms of the GNU Public License (GPL).
The primary author of the code is Steve Plimpton, who can be emailed
at sjplimp@sandia.gov. The LAMMPS WWW Site at lammps.sandia.gov has
more information about the code and its uses.
"""
epub_author = 'The LAMMPS Developers'
# configure spelling extension if present
import importlib.util
spelling_spec = importlib.util.find_spec("sphinxcontrib.spelling")
if spelling_spec:
extensions.append('sphinxcontrib.spelling')
spelling_lang='en_US'
spelling_word_list_filename='false_positives.txt'
| ramisetti/lammps | doc/utils/sphinx-config/conf.py | Python | gpl-2.0 | 10,029 | [
"LAMMPS"
] | a2eddb24ea4974b2f3118c724fc2cf81daffc98002dfc78ce690f38d5216b90b |
import types
import os
import sys
import imp
from DIRAC.Core.Utilities import List
from DIRAC import gConfig, S_ERROR, S_OK, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import Extensions
from DIRAC.ConfigurationSystem.Client import PathFinder
class ModuleLoader( object ):
def __init__( self, importLocation, sectionFinder, superClass, csSuffix = False, moduleSuffix = False ):
self.__modules = {}
self.__loadedModules = {}
self.__superClass = superClass
#Function to find the
self.__sectionFinder = sectionFinder
#Import from where? <Ext>.<System>System.<importLocation>.<module>
self.__importLocation = importLocation
#Where to look in the CS for the module? /Systems/<System>/<Instance>/<csSuffix>
if not csSuffix:
csSuffix = "%ss" % importLocation
self.__csSuffix = csSuffix
#Module suffix (for Handlers)
self.__modSuffix = moduleSuffix
#Helper
self.__exts = Extensions()
def getModules( self ):
data = dict( self.__modules )
for k in data:
data[ k ][ 'standalone' ] = len( data ) == 1
return data
def loadModules( self, modulesList, hideExceptions = False ):
"""
Load all modules required in moduleList
"""
for modName in modulesList:
gLogger.verbose( "Checking %s" % modName )
#if it's a executor modName name just load it and be done with it
if modName.find( "/" ) > -1:
gLogger.verbose( "Module %s seems to be a valid name. Try to load it!" % modName )
result = self.loadModule( modName, hideExceptions = hideExceptions )
if not result[ 'OK' ]:
return result
continue
#Check if it's a system name
#Look in the CS
system = modName
#Can this be generated with sectionFinder?
csPath = "%s/Executors" % PathFinder.getSystemSection ( system, ( system, ) )
gLogger.verbose( "Exploring %s to discover modules" % csPath )
result = gConfig.getSections( csPath )
if result[ 'OK' ]:
#Add all modules in the CS :P
for modName in result[ 'Value' ]:
result = self.loadModule( "%s/%s" % ( system, modName ), hideExceptions = hideExceptions )
if not result[ 'OK' ]:
return result
#Look what is installed
parentModule = False
for rootModule in self.__exts.getInstalledExtensions():
if system.find( "System" ) != len( system ) - 6:
parentImport = "%s.%sSystem.%s" % ( rootModule, system, self.__csSuffix )
else:
parentImport = "%s.%s.%s" % ( rootModule, system, self.__csSuffix )
#HERE!
result = self.__recurseImport( parentImport )
if not result[ 'OK' ]:
return result
parentModule = result[ 'Value' ]
if parentModule:
break
if not parentModule:
continue
parentPath = parentModule.__path__[0]
gLogger.notice( "Found modules path at %s" % parentImport )
for entry in os.listdir( parentPath ):
if entry[-3:] != ".py" or entry == "__init__.py":
continue
if not os.path.isfile( os.path.join( parentPath, entry ) ):
continue
modName = "%s/%s" % ( system, entry[:-3] )
gLogger.verbose( "Trying to import %s" % modName )
result = self.loadModule( modName,
hideExceptions = hideExceptions,
parentModule = parentModule )
return S_OK()
def loadModule( self, modName, hideExceptions = False, parentModule = False ):
"""
Load module name.
name must take the form [DIRAC System Name]/[DIRAC module]
"""
while modName and modName[0] == "/":
modName = modName[1:]
if modName in self.__modules:
return S_OK()
modList = modName.split( "/" )
if len( modList ) != 2:
return S_ERROR( "Can't load %s: Invalid module name" % ( modName ) )
csSection = self.__sectionFinder( modName )
loadGroup = gConfig.getValue( "%s/Load" % csSection, [] )
#Check if it's a load group
if loadGroup:
gLogger.info( "Found load group %s. Will load %s" % ( modName, ", ".join( loadGroup ) ) )
for loadModName in loadGroup:
if loadModName.find( "/" ) == -1:
loadModName = "%s/%s" % ( modList[0], loadModName )
result = self.loadModule( loadModName, hideExceptions = hideExceptions, parentModule = False )
if not result[ 'OK' ]:
return result
return S_OK()
#Normal load
loadName = gConfig.getValue( "%s/Module" % csSection, "" )
if not loadName:
loadName = modName
gLogger.info( "Loading %s" % ( modName ) )
else:
if loadName.find( "/" ) == -1:
loadName = "%s/%s" % ( modList[0], loadName )
gLogger.info( "Loading %s (%s)" % ( modName, loadName ) )
#If already loaded, skip
loadList = loadName.split( "/" )
if len( loadList ) != 2:
return S_ERROR( "Can't load %s: Invalid module name" % ( loadName ) )
system, module = loadList
#Load
className = module
if self.__modSuffix:
className = "%s%s" % ( className, self.__modSuffix )
if loadName not in self.__loadedModules:
#Check if handler is defined
loadCSSection = self.__sectionFinder( loadName )
handlerPath = gConfig.getValue( "%s/HandlerPath" % loadCSSection, "" )
if handlerPath:
gLogger.info( "Trying to %s from CS defined path %s" % ( loadName, handlerPath ) )
gLogger.verbose( "Found handler for %s: %s" % ( loadName, handlerPath ) )
handlerPath = handlerPath.replace( "/", "." )
if handlerPath.find( ".py", len( handlerPath ) -3 ) > -1:
handlerPath = handlerPath[ :-3 ]
className = List.fromChar( handlerPath, "." )[-1]
result = self.__recurseImport( handlerPath )
if not result[ 'OK' ]:
return S_ERROR( "Cannot load user defined handler %s: %s" % ( handlerPath, result[ 'Message' ] ) )
gLogger.verbose( "Loaded %s" % handlerPath )
elif parentModule:
gLogger.info( "Trying to autodiscover %s from parent" % loadName )
#If we've got a parent module, load from there.
modImport = module
if self.__modSuffix:
modImport = "%s%s" % ( modImport, self.__modSuffix )
result = self.__recurseImport( modImport, parentModule, hideExceptions = hideExceptions )
else:
#Check to see if the module exists in any of the root modules
gLogger.info( "Trying to autodiscover %s" % loadName )
rootModulesToLook = self.__exts.getInstalledExtensions()
for rootModule in rootModulesToLook:
importString = '%s.%sSystem.%s.%s' % ( rootModule, system, self.__importLocation, module )
if self.__modSuffix:
importString = "%s%s" % ( importString, self.__modSuffix )
gLogger.verbose( "Trying to load %s" % importString )
result = self.__recurseImport( importString, hideExceptions = hideExceptions )
#Error while loading
if not result[ 'OK' ]:
return result
#Something has been found! break :)
if result[ 'Value' ]:
gLogger.verbose( "Found %s" % importString )
break
#Nothing found
if not result[ 'Value' ]:
return S_ERROR( "Could not find %s" % loadName )
modObj = result[ 'Value' ]
try:
#Try to get the class from the module
modClass = getattr( modObj, className )
except AttributeError:
location = ""
if '__file__' in dir( modObj ):
location = modObj.__file__
else:
location = modObj.__path__
gLogger.exception( "%s module does not have a %s class!" % ( location, module ) )
return S_ERROR( "Cannot load %s" % module )
#Check if it's subclass
if not issubclass( modClass, self.__superClass ):
return S_ERROR( "%s has to inherit from %s" % ( loadName, self.__superClass.__name__ ) )
self.__loadedModules[ loadName ] = { 'classObj' : modClass, 'moduleObj' : modObj }
#End of loading of 'loadName' module
#A-OK :)
self.__modules[ modName ] = self.__loadedModules[ loadName ].copy()
#keep the name of the real code module
self.__modules[ modName ][ 'modName' ] = modName
self.__modules[ modName ][ 'loadName' ] = loadName
gLogger.notice( "Loaded module %s" % modName )
return S_OK()
def __recurseImport( self, modName, parentModule = False, hideExceptions = False ):
#Is it already loaded?
fName = modName
if parentModule:
fName = "%s.%s" % ( parentModule.__name__, fName )
try:
return S_OK( sys.modules[ fName ] )
except KeyError:
pass
#Recurse time!
if type( modName ) in types.StringTypes:
modName = List.fromChar( modName, "." )
#Is the first already loaded?
impModule = False
fName = modName[0]
if parentModule:
fName = "%s.%s" % ( parentModule.__name__, fName )
try:
impModule = sys.modules[ fName ]
except KeyError:
pass
#Not loaded. Try to do so
if not impModule:
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError, excp:
strExcp = str( excp )
if strExcp.find( "No module named" ) == 0 and strExcp.find( modName[0] ) == len( strExcp ) - len( modName[0] ):
return S_OK()
errMsg = "Can't load %s" % ".".join( modName )
if not hideExceptions:
gLogger.exception( errMsg )
return S_ERROR( errMsg )
if len( modName ) == 1:
return S_OK( impModule )
return self.__recurseImport( modName[1:], impModule )
| avedaee/DIRAC | Core/Base/private/ModuleLoader.py | Python | gpl-3.0 | 9,914 | [
"DIRAC"
] | a242f08f9b6aeac5279e96934be57efdd58f14d451a7ed1d60260f196a131231 |
from __future__ import unicode_literals
import unittest
import os
import json
from pymatgen.core.periodic_table import Element
from pymatgen.phonon.dos import PhononDos, CompletePhononDos
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
import scipy
class DosTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_ph_dos.json"), "r") as f:
self.dos = PhononDos.from_dict(json.load(f))
def test_properties(self):
self.assertAlmostEqual(self.dos.densities[15], 0.0001665998)
self.assertAlmostEqual(self.dos.frequencies[20], 0.0894965119)
self.assertAlmostEqual(self.dos.get_interpolated_value(3.), 1.2915532670115628)
self.assertEqual(len(self.dos.frequencies), 201)
self.assertEqual(len(self.dos.densities), 201)
def test_get_smeared_densities(self):
smeared = self.dos.get_smeared_densities(0.01)
self.assertAlmostEqual(smeared[20], 0.00084171007635058825)
dens = self.dos.densities
self.assertAlmostEqual(sum(dens), sum(smeared))
def test_dict_methods(self):
s = json.dumps(self.dos.as_dict())
self.assertIsNotNone(s)
self.assertMSONable(self.dos)
class CompleteDosTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.cdos = CompletePhononDos.from_dict(json.load(f))
def test_properties(self):
site_Na = self.cdos.structure[0]
site_Cl = self.cdos.structure[1]
self.assertEqual(len(self.cdos.frequencies), 201)
self.assertAlmostEqual(self.cdos.pdos[site_Na][30], 0.008058208)
self.assertAlmostEqual(self.cdos.get_site_dos(site_Na).densities[30], 0.008058208)
self.assertAlmostEqual(self.cdos.pdos[site_Cl][30], 0.0119040783)
self.assertIn(Element.Na, self.cdos.get_element_dos())
self.assertIn(Element.Cl, self.cdos.get_element_dos())
sum_dos = self.cdos.get_element_dos()[Element.Na] + self.cdos.get_element_dos()[Element.Cl]
self.assertArrayAlmostEqual(sum_dos.frequencies, self.cdos.frequencies)
def test_dict_methods(self):
s = json.dumps(self.cdos.as_dict())
self.assertIsNotNone(s)
self.assertMSONable(self.cdos)
def test_str(self):
self.assertIsNotNone(str(self.cdos))
if __name__ == '__main__':
unittest.main()
| xhqu1981/pymatgen | pymatgen/phonon/tests/test_dos.py | Python | mit | 2,515 | [
"pymatgen"
] | 4a53705227cfe1c9a5fb840fc9bca8a38e79b6b87d4b693e0b7218e1ed0e2dbd |
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin, urlsplit, parse_qs, urlunsplit
from django.views.generic import TemplateView
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
import edx_oauth2_provider
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.template.response import TemplateResponse
from provider.oauth2.models import Client
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from util.enterprise_helpers import data_sharing_consent_requirement_at_login
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED,
LogoutViewConfiguration, RegistrationCookieConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.tasks import send_activation_email
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
import openedx.core.djangoapps.external_auth.views
from openedx.core.djangoapps.external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from util.password_policy_validators import validate_password_strength
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
destroy_oauth_tokens
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies, set_user_info_cookie
from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from openedx.core.djangoapps.embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.programs import utils as programs_utils
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.catalog.utils import get_programs_data
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "profile"])
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
programs_list = []
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
# Getting all the programs from course-catalog service. The programs_list is being added to the context but it's
# not being used currently in lms/templates/index.html. To use this list, you need to create a custom theme that
# overrides index.html. The modifications to index.html to display the programs will be done after the support
# for edx-pattern-library is added.
if configuration_helpers.get_value("DISPLAY_PROGRAMS_ON_MARKETING_PAGES",
settings.FEATURES.get("DISPLAY_PROGRAMS_ON_MARKETING_PAGES")):
programs_list = get_programs_data(user)
context["programs_list"] = programs_list
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): If not None, ONLY courses of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not attributed to the current ORG.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, filter out any enrollments with courses attributed to current ORG.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in {'generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'}:
persisted_grade = CourseGradeFactory().get_persisted(user, course_overview)
if persisted_grade is not None:
status_dict['grade'] = unicode(persisted_grade.percent)
elif 'grade' in cert_status:
status_dict['grade'] = cert_status['grade']
else:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(
openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
"""
Provides the LMS dashboard view
TODO: This is lms specific and does not belong in common code.
Arguments:
request: The request object.
Returns:
The dashboard response.
"""
user = request.user
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
enable_verified_certificates = configuration_helpers.get_value(
'ENABLE_VERIFIED_CERTIFICATES',
settings.FEATURES.get('ENABLE_VERIFIED_CERTIFICATES')
)
display_course_modes_on_dashboard = configuration_helpers.get_value(
'DISPLAY_COURSE_MODES_ON_DASHBOARD',
settings.FEATURES.get('DISPLAY_COURSE_MODES_ON_DASHBOARD', True)
)
# we want to filter and only show enrollments for courses within
# the 'ORG' defined in configuration.
course_org_filter = configuration_helpers.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a configuration
org_filter_out_set = configuration_helpers.get_all_orgs()
# remove our current org from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Find programs associated with courses being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = programs_utils.ProgramProgressMeter(user, enrollments=course_enrollments)
programs_by_run = meter.engaged_programs(by_run=True)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'programs_by_run': programs_by_run,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
'disable_courseware_js': True,
'display_course_modes_on_dashboard': enable_verified_certificates and display_course_modes_on_dashboard,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
response = render_to_response('dashboard.html', context)
set_user_info_cookie(response, request)
return response
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
unicode(course_id): [mode for mode in modes]
for course_id, modes in course_modes.iteritems()
}
flat_all_modes = {
unicode(course_id): [mode.slug for mode in modes]
for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems()
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
TODO: This is lms specific and does not belong in common code.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an "
"email message with instructions for activating your account.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# Add a form requirement for data sharing consent if the EnterpriseCustomer
# for the request requires it at login
extra_fields['data_sharing_consent'] = data_sharing_consent_requirement_at_login(request)
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Store received data sharing consent field values in the pipeline for use
# by any downstream pipeline elements which require them.
running_pipeline['kwargs']['data_sharing_consent'] = form.cleaned_data.get('data_sharing_consent', None)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, profile=profile)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
dest_addr = user.email
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
send_activation_email.delay(subject, message, from_address, dest_addr)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
try:
record_registration_attributions(request, new_user)
# Don't prevent a user from registering due to attribution errors.
except Exception: # pylint: disable=broad-except
log.exception('Error while attributing cookies to user registration.')
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def record_affiliate_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user and affiliate_id:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
def record_utm_registration_attribution(request, user):
"""
Attribute this user's registration to the latest UTM referrer, if
applicable.
"""
utm_cookie_name = RegistrationCookieConfiguration.current().utm_cookie_name
utm_cookie = request.COOKIES.get(utm_cookie_name)
if user and utm_cookie:
utm = json.loads(utm_cookie)
for utm_parameter_name in REGISTRATION_UTM_PARAMETERS:
utm_parameter = utm.get(utm_parameter_name)
if utm_parameter:
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_PARAMETERS.get(utm_parameter_name),
utm_parameter
)
created_at_unixtime = utm.get('created_at')
if created_at_unixtime:
# We divide by 1000 here because the javascript timestamp generated is in milliseconds not seconds.
# PYTHON: time.time() => 1475590280.823698
# JS: new Date().getTime() => 1475590280823
created_at_datetime = datetime.datetime.fromtimestamp(int(created_at_unixtime) / float(1000), tz=UTC)
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_CREATED_AT,
created_at_datetime
)
def record_registration_attributions(request, user):
"""
Attribute this user's registration based on referrer cookies.
"""
record_affiliate_registration_attribution(request, user)
record_utm_registration_attribution(request, user)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
* `is_active` : make/update account with status provided as 'is_active'
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
redirect_to = request.GET.get('redirect_to', None)
active_status = request.GET.get('is_active')
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
active_status = (not active_status or active_status == 'true')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.is_active = active_status
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
if active_status:
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to specific page if specified
if redirect_to:
redirect_url = redirect_to
# Redirect to course info page if course_id is known
elif course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
is_valid_password: a boolean indicating if the new password
passes the validation.
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
is_password_valid = err_msg is None
return is_password_valid, err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if request.method == 'POST':
password = request.POST['new_password1']
is_password_valid, password_err_msg = validate_password(user, password)
if not is_password_valid:
# We have a password reset attempt which violates some security
# policy. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': password_err_msg,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200 and not response.context_data['form'].is_valid():
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if LogoutViewConfiguration.current().enabled and self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
| amir-qayyum-khan/edx-platform | common/djangoapps/student/views.py | Python | agpl-3.0 | 109,839 | [
"VisIt"
] | ff0e395b02cab72f9b1609b38cfc1abd6eeecd0f2f44e16e290c4597fbe7a5ca |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy as np
from skbio import TreeNode
from skbio.tree import majority_rule
from skbio.tree._majority_rule import (_walk_clades, _filter_clades,
_build_trees)
class MajorityRuleTests(TestCase):
def test_majority_rule(self):
trees = [
TreeNode.from_newick("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));"),
TreeNode.from_newick("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));"),
TreeNode.from_newick("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));"),
TreeNode.from_newick("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));"),
TreeNode.from_newick("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));"),
TreeNode.from_newick("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));"),
TreeNode.from_newick("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));"),
TreeNode.from_newick("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));"),
TreeNode.from_newick("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));")]
exp = TreeNode.from_newick("(((E,(G,(F,I),(C,(D,J,H)))),B),A);")
obs = majority_rule(trees)
self.assertEqual(exp.compare_subsets(obs[0]), 0.0)
self.assertEqual(len(obs), 1)
tree = obs[0]
exp_supports = sorted([9.0, 9.0, 9.0, 6.0, 6.0, 6.0])
obs_supports = sorted([n.support for n in tree.non_tips()])
self.assertEqual(obs_supports, exp_supports)
obs = majority_rule(trees, weights=np.ones(len(trees)) * 2)
self.assertEqual(exp.compare_subsets(obs[0]), 0.0)
self.assertEqual(len(obs), 1)
tree = obs[0]
exp_supports = sorted([18.0, 18.0, 12.0, 18.0, 12.0, 12.0])
obs_supports = sorted([n.support for n in tree.non_tips()])
with self.assertRaises(ValueError):
majority_rule(trees, weights=[1, 2])
def test_majority_rule_multiple_trees(self):
trees = [
TreeNode.from_newick("((a,b),(c,d),(e,f))"),
TreeNode.from_newick("(a,(c,d),b,(e,f))"),
TreeNode.from_newick("((c,d),(e,f),b)"),
TreeNode.from_newick("(a,(c,d),(e,f))")]
trees = majority_rule(trees)
self.assertEqual(len(trees), 4)
exp = set([
frozenset(['a']),
frozenset(['b']),
frozenset([None, 'c', 'd']),
frozenset([None, 'e', 'f'])])
obs = set([frozenset([n.name for n in t.traverse()]) for t in trees])
self.assertEqual(obs, exp)
def test_walk_clades(self):
trees = [TreeNode.from_newick("((A,B),(D,E));"),
TreeNode.from_newick("((A,B),(D,(E,X)));")]
exp_clades = [
(frozenset(['A']), 2.0),
(frozenset(['B']), 2.0),
(frozenset(['A', 'B']), 2.0),
(frozenset(['D', 'E']), 1.0),
(frozenset(['D', 'E', 'A', 'B']), 1.0),
(frozenset(['D']), 2.0),
(frozenset(['E']), 2.0),
(frozenset(['X']), 1.0),
(frozenset(['E', 'X']), 1.0),
(frozenset(['D', 'E', 'X']), 1.0),
(frozenset(['A', 'B', 'D', 'E', 'X']), 1.0)]
exp_lengths_nolength = {
frozenset(['A']): None,
frozenset(['B']): None,
frozenset(['A', 'B']): None,
frozenset(['D', 'E']): None,
frozenset(['D', 'E', 'A', 'B']): None,
frozenset(['D']): None,
frozenset(['E']): None,
frozenset(['X']): None,
frozenset(['E', 'X']): None,
frozenset(['D', 'E', 'X']): None,
frozenset(['A', 'B', 'D', 'E', 'X']): None}
exp_lengths = {
frozenset(['A']): 2.0,
frozenset(['B']): 2.0,
frozenset(['A', 'B']): 2.0,
frozenset(['D', 'E']): 1.0,
frozenset(['D', 'E', 'A', 'B']): 1.0,
frozenset(['D']): 2.0,
frozenset(['E']): 2.0,
frozenset(['X']): 1.0,
frozenset(['E', 'X']): 1.0,
frozenset(['D', 'E', 'X']): 1.0,
frozenset(['A', 'B', 'D', 'E', 'X']): 1.0}
obs_clades, obs_lengths = _walk_clades(trees, np.ones(len(trees)))
self.assertEqual(set(obs_clades), set(exp_clades))
self.assertEqual(obs_lengths, exp_lengths_nolength)
for t in trees:
for n in t.traverse(include_self=True):
n.length = 2.0
obs_clades, obs_lengths = _walk_clades(trees, np.ones(len(trees)))
self.assertEqual(set(obs_clades), set(exp_clades))
self.assertEqual(obs_lengths, exp_lengths)
def test_filter_clades(self):
clade_counts = [(frozenset(['A', 'B']), 8),
(frozenset(['A', 'C']), 7),
(frozenset(['A']), 6),
(frozenset(['B']), 5)]
obs = _filter_clades(clade_counts, 2)
exp = {frozenset(['A', 'B']): 8,
frozenset(['A']): 6,
frozenset(['B']): 5}
self.assertEqual(obs, exp)
clade_counts = [(frozenset(['A']), 8),
(frozenset(['B']), 7),
(frozenset(['C']), 7),
(frozenset(['A', 'B']), 6),
(frozenset(['A', 'B', 'C']), 5),
(frozenset(['D']), 2)]
obs = _filter_clades(clade_counts, 4)
exp = {frozenset(['A']): 8,
frozenset(['B']): 7,
frozenset(['C']): 7,
frozenset(['A', 'B']): 6,
frozenset(['A', 'B', 'C']): 5}
self.assertEqual(obs, exp)
def test_build_trees(self):
clade_counts = {frozenset(['A', 'B']): 6,
frozenset(['A']): 7,
frozenset(['B']): 8}
edge_lengths = {frozenset(['A', 'B']): 1,
frozenset(['A']): 2,
frozenset(['B']): 3}
tree = _build_trees(clade_counts, edge_lengths, 'foo')[0]
self.assertEqual(tree.foo, 6)
tree_foos = set([c.foo for c in tree.children])
tree_lens = set([c.length for c in tree.children])
self.assertEqual(tree_foos, set([7, 8]))
self.assertEqual(tree_lens, set([2, 3]))
if __name__ == '__main__':
main()
| JWDebelius/scikit-bio | skbio/tree/tests/test_majority_rule.py | Python | bsd-3-clause | 6,695 | [
"scikit-bio"
] | 402821596829665c197a49f7354951cfc74013d059a80cb8121c716673aab037 |
#!/usr/bin/python
from __future__ import division
import numpy as np
import argparse, itertools
import matplotlib.pyplot as plt
from sklearn import svm
from template_speech_rec import configParserWrapper
from scipy.spatial.distance import pdist, squareform, cdist
def main(args):
"""
"""
config_d = configParserWrapper.load_settings(open(args.config,'r'))
y = []
for fpath_id,fpath in enumerate(args.input_data_matrices):
X0 = np.load(fpath)
X0_shape = X0.shape[1:]
y.extend(fpath_id * np.ones(len(X0)))
y = np.array(y)
# add in two coordinates for the length
if args.input_lengths is not None:
X = np.zeros((len(y),np.prod(X0_shape)+2),dtype=float)
else:
X = np.zeros((len(y),np.prod(X0_shape)),dtype=float)
cur_idx = 0
for fpath_id,fpath in enumerate(args.input_data_matrices):
X0 = np.load(fpath)
X[cur_idx:cur_idx+len(X0),:np.prod(X0_shape)] = X0.reshape(len(X0),
np.prod(X0_shape))
cur_idx += len(X0)
if args.input_lengths is not None:
train_ls = []
for fpath in args.input_lengths:
ls = np.loadtxt(fpath,dtype=int)
train_ls.extend(ls[:,2])
train_ls = np.log(np.tile(np.array(train_ls),
(2,1)).T)
train_ls[:,1] *= train_ls[:,1]
X[:,-2:] = train_ls
X_test = []
y_test = []
for fpath_id,fpath in enumerate(args.input_test_data_matrices):
X0 = np.load(fpath).astype(np.float)
X0_shape = X0.shape[1:]
X_test.extend(X0.reshape(len(X0),
np.prod(X0_shape)))
y_test.extend(fpath_id * np.ones(len(X0)))
X_test = np.array(X_test)
y_test = np.array(y_test)
if args.input_test_lengths is not None:
test_ls = []
for fpath in args.input_test_lengths:
ls = np.loadtxt(fpath,dtype=int)
test_ls.extend(ls[:,2])
test_ls = np.log(np.tile(np.array(test_ls),
(2,1)).T)
test_ls[:,1] *= test_ls[:,1]
X_test = np.hstack((X_test,test_ls))
if args.use_weights is not None:
weights = np.load(args.use_weights)
weights = weights.reshape(weights.size)
X *= weights
X_test *= weights
if args.do_kernel_dist_features:
X_test = np.exp(-cdist(X_test,X, 'euclidean'))
X = np.exp(-squareform(pdist(X, 'euclidean')))
penalty_names = config_d['SVM']['penalty_list'][::2]
penalty_values = tuple( float(k) for k in config_d['SVM']['penalty_list'][1::2])
dev_results = ()
exp_descriptions = ()
exp_description_id = 0
for penalty_name, penalty_value in itertools.izip(penalty_names,penalty_values):
if args.v:
print '%s %s' % ('linear', penalty_name)
if config_d['SVM']['kernel'] == 'linear':
clf = svm.LinearSVC(C=penalty_value,
loss='l1')
clf.fit(X,y)
elif config_d['SVM']['kernel'] == 'polynomial':
import pdb; pdb.set_trace()
np.save('%s_%s_%s_coef.npy' % (args.output_fls_prefix,config_d['SVM']['kernel'],
penalty_name),
clf.coef_[0])
np.save('%s_%s_%s_intercept.npy' % (args.output_fls_prefix,config_d['SVM']['kernel'],
penalty_name),
clf.intercept_)
y_test_hat = clf.predict(X_test)
exp_descriptions += ((config_d['SVM']['kernel'],penalty_name),)
dev_results += ( (exp_description_id,
np.abs(y_test_hat-y_test).sum()/len(y_test), # error rate
np.abs(y_test_hat[y_test==0]-y_test[y_test==0]).sum()/len(y_test[y_test==0]), # mistakes by class 0
np.abs(y_test_hat[y_test==1]-y_test[y_test==1]).sum()/len(y_test[y_test==1]) # mistakes by class 1
),)
if args.v:
print '\t'.join(tuple( str(k) for k in dev_results[-1]))
exp_description_id +=1
open('%s_exp_descriptions' % args.output_fls_prefix,
'w').write('\n'.join(tuple(
'%d %s' % (k,
' '.join(d))
for k,d in enumerate(exp_descriptions))))
np.save('%s_dev_results.npy' % args.output_fls_prefix,
np.array(dev_results))
if __name__=="__main__":
parser = argparse.ArgumentParser("""
Train the svm and save the statistics about the
filters to a directory. This is a wrapper for the linear
SVM provided by scikit learn.
""")
parser.add_argument('--input_data_matrices',
type=str,
nargs='+',
help='list of paths to input matrices, generally assumed to only accept two')
parser.add_argument('--input_test_data_matrices',
type=str,
nargs='+',
help='list of paths to input matrices, generally assumed to only accept two should be in the same order as in the --input_data_matrices arguments')
parser.add_argument('--input_lengths',
type=str,
nargs='+',
default=None,
help='list of paths to the lengths of the input examples of training data, Default is None in which case nothing is included')
parser.add_argument('--input_test_lengths',
type=str,
nargs='+',
default=None,
help='list of paths to the lengths of the input examples of testing data, Default is None in which case nothing is included')
parser.add_argument('--output_fls_prefix',
type=str,
help='prefix for where all the different output files are going to be saved')
parser.add_argument('--config',
type=str,
default='conf/main.config',
help='configuration file')
parser.add_argument('--train_config',
type=str,
default='conf/train.config',
help='train config that will be read and written to')
parser.add_argument('--use_weights',
type=str,
default=None,
help='weights for the feature vector')
parser.add_argument('--do_kernel_dist_features',
action='store_true',
help='whether to use the Gaussian kernel to get the SVM features, generally for use with with a weight vector')
parser.add_argument('-v',
action='store_true',
help='verbosity flag')
main(parser.parse_args())
| markstoehr/phoneclassification | local/CSVMTrain.py | Python | gpl-3.0 | 6,956 | [
"Gaussian"
] | 69e0c4f58c0d56f0c6383de66c0460a1c1462174f82073f3e63557308d9bbbd4 |
# Copyright 2012 by Eric Talevich. All rights reserved.
# This code is part of the Biopython distribution and governed by its license.
# Please see the LICENSE file that should have been included as part of this
# package.
"""Command-line wrapper for the tree inference program RAxML.
Derived from the help page for RAxML version 7.3 by Alexandros Stamatakis, but
should work for any version 7.X (and probably earlier for most options).
"""
__docformat__ = "restructuredtext en"
from Bio.Application import _Option, _Switch, AbstractCommandline
class RaxmlCommandline(AbstractCommandline):
"""Command-line wrapper for the tree inference program RAxML.
The required parameters are 'sequences' (-s), 'model' (-m) and 'name' (-n).
The parameter 'parsimony_seed' (-p) must also be set for RAxML, but if you
do not specify it, this wrapper will set the seed to 10000 for you.
Example:
>>> from Bio.Phylo.Applications import RaxmlCommandline
>>> raxml_cline = RaxmlCommandline(sequences="Tests/Phylip/interlaced2.phy",
... model="PROTCATWAG", name="interlaced2")
>>> print raxml_cline
raxmlHPC -m PROTCATWAG -n interlaced2 -p 10000 -s Tests/Phylip/interlaced2.phy
You would typically run the command line with raxml_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citation:
Stamatakis A.
RAxML-VI-HPC: Maximum Likelihood-based Phylogenetic Analyses with
Thousands of Taxa and Mixed Models.
Bioinformatics 2006, 22(21):2688-2690.
Homepage: http://sco.h-its.org/exelixis/software.html
"""
def __init__(self, cmd='raxmlHPC', **kwargs):
self.parameters = [
_Option(['-a', 'weight_filename'],
"Name of a column weight file to assign individual weights "
"to each column of the alignment. Those weights must be "
"integers separated by any type and number of whitespaces "
"within a separate file.",
filename=True,
equate=False,
),
_Option(['-b', 'bootstrap_seed'],
"Random seed for bootstrapping.",
equate=False,
),
_Option(['-c', 'num_categories'],
"Number of distinct rate categories for RAxML when "
"evolution model is set to GTRCAT or GTRMIX."
"Individual per-site rates are categorized into this "
"many rate categories to accelerate computations. "
"Default: 25.",
equate=False,
),
_Switch(['-d', 'random_starting_tree'],
"Start ML optimization from random starting tree."
),
_Option(['-e', 'epsilon'],
"Set model optimization precision in log likelihood units "
"for final optimization of tree topology under MIX/MIXI "
"or GAMMA/GAMMAI."
"Default: 0.1 for models not using proportion of "
"invariant sites estimate; 0.001 for models using "
"proportion of invariant sites estimate.",
equate=False,
),
_Option(['-E', 'exclude_filename'],
"An exclude file name, containing a specification of "
"alignment positions you wish to exclude. Format is "
"similar to Nexus, the file shall contain entries like "
"'100-200 300-400'; to exclude a single column write, "
"e.g., '100-100'. If you use a mixed model, an "
"appropriately adapted model file will be written.",
filename=True,
equate=False,
),
_Option(['-f', 'algorithm'],
"""Select algorithm:
a: Rapid Bootstrap analysis and search for best-scoring ML
tree in one program run.
b: Draw bipartition information on a tree provided with '-t'
based on multiple trees (e.g. form a bootstrap) in a file
specifed by '-z'.
c: Check if the alignment can be properly read by RAxML.
d: New rapid hill-climbing (DEFAULT).
e: Optimize model+branch lengths for given input tree under
GAMMA/GAMMAI only.
g: Compute per site log Likelihoods for one ore more trees
passed via '-z' and write them to a file that can be read
by CONSEL.
h: Compute log likelihood test (SH-test) between best tree
passed via '-t' and a bunch of other trees passed via '-z'.
i: Perform a really thorough bootstrap, refinement of final
bootstrap tree under GAMMA and a more exhaustive algorithm.
j: Generate a bunch of bootstrapped alignment files from an
original alignemnt file.
m: Compare bipartitions between two bunches of trees passed
via '-t' and '-z' respectively. This will return the
Pearson correlation between all bipartitions found in the
two tree files. A file called
RAxML_bipartitionFrequencies.outputFileName will be
printed that contains the pair-wise bipartition
frequencies of the two sets.
n: Compute the log likelihood score of all trees contained
in a tree file provided by '-z' under GAMMA or
GAMMA+P-Invar.
o: Old and slower rapid hill-climbing.
p: Perform pure stepwise MP addition of new sequences to an
incomplete starting tree.
s: Split up a multi-gene partitioned alignment into the
respective subalignments.
t: Do randomized tree searches on one fixed starting tree.
w: Compute ELW test on a bunch of trees passed via '-z'.
x: Compute pair-wise ML distances, ML model parameters will
be estimated on an MP starting tree or a user-defined
tree passed via '-t', only allowed for GAMMA-based models
of rate heterogeneity.""",
checker_function=(lambda x:
isinstance(x, basestring) and len(x) == 1),
equate=False,
),
_Option(['-g', 'grouping_constraint'],
"File name of a multifurcating constraint tree. "
"this tree does not need to be comprehensive, i.e. "
"contain all taxa.",
filename=True,
equate=False,
),
_Option(['-i', 'rearrangements'],
"Initial rearrangement setting for the subsequent "
"application of topological changes phase.",
equate=False,
),
_Switch(['-j', 'checkpoints'],
"Write checkpoints (intermediate tree topologies)."
),
_Switch(['-k', 'bootstrap_branch_lengths'],
"Print bootstrapped trees with branch lengths. "
"The bootstraps will run a bit longer, because model "
"parameters will be optimized at the end of each run. "
"Use with CATMIX/PROTMIX or GAMMA/GAMMAI."
),
_Option(['-l', 'cluster_threshold'],
"Threshold for sequence similarity clustering. "
"RAxML will then print out an alignment to a file "
"called sequenceFileName.reducedBy.threshold that "
"only contains sequences <= the specified threshold "
"that must be between 0.0 and 1.0. RAxML uses the "
"QT-clustering algorithm to perform this task. "
"In addition, a file called "
"RAxML_reducedList.outputFileName will be written "
"that contains clustering information.",
equate=False,
),
_Option(['-L', 'cluster_threshold_fast'],
"Same functionality as '-l', but uses a less "
"exhaustive and thus faster clustering algorithm. "
"This is intended for very large datasets with more "
"than 20,000-30,000 sequences.",
equate=False,
),
_Option(['-m', 'model'],
"""Model of Nucleotide or Amino Acid Substitution:
NUCLEOTIDES:
GTRCAT : GTR + Optimization of substitution rates + Optimization of site-specific
evolutionary rates which are categorized into numberOfCategories distinct
rate categories for greater computational efficiency
if you do a multiple analysis with '-#' or '-N' but without bootstrapping the program
will use GTRMIX instead
GTRGAMMA : GTR + Optimization of substitution rates + GAMMA model of rate
heterogeneity (alpha parameter will be estimated)
GTRMIX : Inference of the tree under GTRCAT
and thereafter evaluation of the final tree topology under GTRGAMMA
GTRCAT_GAMMA : Inference of the tree with site-specific evolutionary rates.
However, here rates are categorized using the 4 discrete GAMMA rates.
Evaluation of the final tree topology under GTRGAMMA
GTRGAMMAI : Same as GTRGAMMA, but with estimate of proportion of invariable sites
GTRMIXI : Same as GTRMIX, but with estimate of proportion of invariable sites
GTRCAT_GAMMAI : Same as GTRCAT_GAMMA, but with estimate of proportion of invariable sites
AMINO ACIDS:
PROTCATmatrixName[F] : specified AA matrix + Optimization of substitution rates + Optimization of site-specific
evolutionary rates which are categorized into numberOfCategories distinct
rate categories for greater computational efficiency
if you do a multiple analysis with '-#' or '-N' but without bootstrapping the program
will use PROTMIX... instead
PROTGAMMAmatrixName[F] : specified AA matrix + Optimization of substitution rates + GAMMA model of rate
heterogeneity (alpha parameter will be estimated)
PROTMIXmatrixName[F] : Inference of the tree under specified AA matrix + CAT
and thereafter evaluation of the final tree topology under specified AA matrix + GAMMA
PROTCAT_GAMMAmatrixName[F] : Inference of the tree under specified AA matrix and site-specific evolutionary rates.
However, here rates are categorized using the 4 discrete GAMMA rates.
Evaluation of the final tree topology under specified AA matrix + GAMMA
PROTGAMMAImatrixName[F] : Same as PROTGAMMAmatrixName[F], but with estimate of proportion of invariable sites
PROTMIXImatrixName[F] : Same as PROTMIXmatrixName[F], but with estimate of proportion of invariable sites
PROTCAT_GAMMAImatrixName[F] : Same as PROTCAT_GAMMAmatrixName[F], but with estimate of proportion of invariable sites
Available AA substitution models: DAYHOFF, DCMUT, JTT, MTREV, WAG, RTREV, CPREV, VT, BLOSUM62, MTMAM, GTR
With the optional 'F' appendix you can specify if you want to use empirical base frequencies
Please not that for mixed models you can in addition specify the per-gene AA model in
the mixed model file (see manual for details)
""",
equate=False,
),
_Switch(['-M', 'partition_branch_lengths'],
"Switch on estimation of individual per-partition "
"branch lengths. Only has effect when used in "
"combination with 'partition_filename' ('-q'). "
"Branch lengths for individual partitions will be "
"printed to separate files. A weighted average of the "
"branch lengths is computed by using the respective "
"partition lengths. "
),
_Option(['-n', 'name'],
"Name used in the output files.",
filename=True,
equate=False,
),
_Option(['-o', 'outgroup'],
"Name of a single outgroup or a comma-separated list "
"of outgroups, eg '-o Rat' or '-o Rat,Mouse'. In case "
"that multiple outgroups are not monophyletic the "
"first name in the list will be selected as outgroup. "
"Don't leave spaces between taxon names!",
checker_function=lambda x: len(x.split()) == 1,
equate=False,
),
_Option(['-q', 'partition_filename'],
"File name containing the assignment of models to "
"alignment partitions for multiple models of "
"substitution. For the syntax of this file please "
"consult the RAxML manual.",
filename=True,
equate=False,
),
_Option(['-p', 'parsimony_seed'],
"Random number seed for the parsimony inferences. "
"This allows you to reproduce your results and will "
"help developers debug the program. This option HAS "
"NO EFFECT in the parallel MPI version.",
equate=False,
),
_Option(['-P', 'protein_model'],
"File name of a user-defined AA (Protein) substitution "
"model. This file must contain 420 entries, the first "
"400 being the AA substitution rates (this must be a "
"symmetric matrix) and the last 20 are the empirical "
"base frequencies.",
filename=True,
equate=False,
),
_Option(['-r', 'binary_constraint'],
"File name of a binary constraint tree. "
"This tree does not need to be comprehensive, i.e. "
"contain all taxa.",
filename=True,
equate=False,
),
_Option(['-s', 'sequences'],
"Name of the alignment data file, in PHYLIP format.",
filename=True,
equate=False,
),
_Option(['-t', 'starting_tree'],
"File name of a user starting tree, in Newick format.",
filename=True,
equate=False,
),
_Option(['-T', 'threads'],
"Number of threads to run. "
"PTHREADS VERSION ONLY! "
"Make sure to set this at most the number of CPUs "
"you have on your machine, otherwise, there will be "
"a huge performance decrease!",
equate=False,
),
_Option(['-u', 'num_bootstrap_searches'],
"Number of multiple bootstrap searches per replicate. "
"Use this to obtain better ML trees for each "
"replicate. Default: 1 ML search per bootstrap "
"replicate.",
equate=False,
),
_Switch(['-v', 'version'],
"Display version information."
),
_Option(['-w', 'working_dir'],
"Name of the working directory where RAxML will "
"write its output files. Default: current directory.",
filename=True,
),
_Option(['-x', 'rapid_bootstrap_seed'],
"Random seed for rapid bootstrapping.",
equate=False,
),
_Switch(['-y', 'parsimony'],
"Only compute a parsimony starting tree, then exit."
),
_Option(['-z', 'bipartition_filename'],
"Name of a file containing multiple trees, e.g. from "
"a bootstrap run, that shall be used to draw "
"bipartition values onto a tree provided with '-t'. "
"It can also be used to compute per-site log "
"likelihoods in combination with '-f g', and to read "
"a bunch of trees for a couple of other options "
"('-f h', '-f m', '-f n').",
filename=True,
equate=False,
),
_Option(['-N', '-#', 'num_replicates'],
"Number of alternative runs on distinct starting trees. "
"In combination with the '-b' option, this will invoke a "
"multiple boostrap analysis. "
"DEFAULT: 1 single analysis."
"Note that '-N' has been added as an alternative since "
"'-#' sometimes caused problems with certain MPI job "
"submission systems, since '-#' is often used to start "
"comments. "
,
equate=False,
),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
# ENH: enforce -s, -n and -m
if not self.parsimony_seed:
self.parsimony_seed = 10000
| bryback/quickseq | genescript/Bio/Phylo/Applications/_Raxml.py | Python | mit | 19,768 | [
"Biopython"
] | 24ac08cc61fcd3c658ae99ca93f76cb19a65015e45145bd2cdb98e0270591edf |
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO support for Exonerate output formats.
This module adds support for handling Exonerate outputs. Exonerate is a generic
tool for pairwise sequence comparison that allows you to align sequences using
several different models.
Bio.SearchIO.ExonerateIO was tested on the following Exonerate versions and
models:
- version: 2.2
- models:
- affine:local - cdna2genome
- coding2coding - est2genome
- genome2genome - ner
- protein2dna - protein2genome
- ungapped - ungapped:translated
Although model testing were not exhaustive, ExonerateIO should be able to cope
with all Exonerate models. Please file a bug report if you stumble upon an
unparseable file.
More information on Exonerate is available on its home page at
www.ebi.ac.uk/~guy/exonerate/
Supported Formats
=================
- Plain text alignment - 'exonerate-text' - parsing, indexing
- Vulgar line - 'exonerate-vulgar' - parsing, indexing
- Cigar line - 'exonerate-cigar' - parsing, indexing
On Exonerate, these output formats are not exclusive to one another. For
example, you may have both plain text and vulgar output in the same file.
ExonerateIO can only handle one of these at a time, however. If you have a file
containing both plain text and vulgar lines, for example, you have to pick
either 'exonerate-text' or 'exonerate-vulgar' to parse it.
Due to the cigar format specification, many features of the alignments such as
introns or frameshifts may be collapsed into a single feature (in this case,
they are labelled 'D' for 'deletion'). The parser does not attempt to guess
whether the D label it encounters is a real deletion or a collapsed feature.
As such, parsing or indexing using 'exonerate-cigar' may yield different results
compared to 'exonerate-text' or 'exonerate-vulgar'.
exonerate-text
==============
The plain text output / C4 alignment is the output triggered by the
'--showalignemnt' flag. Compared to the two other output formats, this format
contains the most information, having the complete query and hit sequences of
the alignment.
Here are some examples of the C4 output alignment that ExonerateIO can handle
(coordinates not written in scale)::
1. simple ungapped alignments
1 : ATGGGCAATATCCTTCGGAAAGGTCAGCAAAT : 56
||||||||||||||||||||||||||||||||
1319275 : ATGGGCAATATCCTTCGGAAAGGTCAGCAAAT : 1319220
2. alignments with frameshifts:
129 : -TGCCGTTACCAT----GACGAAAGTATTAAT : 160
-CysArgTyrHis----AspGluSerIleAsn
#||||||||||||####|||||||||||||||
#CysArgTyrHis####AspGluSerIleAsn
1234593 : GTGCCGTTACCATCGGTGACGAAAGTATTAAT : 1234630
3. alignments with introns and split codons:
382 : {A} {CC}AAA : 358
AAA{T} >>>> Target Intron 3 >>>> {hr}LysATGAGCGATGAAAATA
|| { }++ 55423 bp ++{ } ! ||| ||||||||||
AAC{L}gt.........................ag{eu}AspTTGAATGATGAAAATA
42322 : {C} {TG}GAT : 97769
4. alignments with NER blocks
111 : CAGAAAA--< 31 >--CTGCCCAGAAT--< 10 >--AACGAGCGTTCCG- : 184
| |||||--< NER 1 >--| ||||| | |--< NER 2 >--||| | ||||||-
297911 : CTGAAAA--< 29 >--CCGCCCAAAGT--< 13 >--AACTGGAGTTCCG- : 297993
ExonerateIO utilizes the HSPFragment model quite extensively to deal with non-
ungapped alignments. For any single HSPFragment, if ExonerateIO sees an intron,
a NER block, or a frameshift, it will break the fragment into two HSPFragment
objects and adjust each of their start and end coordinate appropriately.
You may notice that Exonerate always uses the three letter amino acid codes to
display protein sequences. If the protein itself is part of the query sequence,
such as in the protein2dna model, ExonerateIO will transform the protein
sequence into using one letter codes. This is because the SeqRecord objects that
store the sequences are designed for single-letter sequences only. If Exonerate
also outputs the underlying nucleotide sequence, it will be saved into an
``aln_annotation`` entry as a list of triplets.
If the protein sequence is not part of the actual alignment, such as in the
est2genome or genome2genome models, ExonerateIO will keep the three letter codes
and store them as ``aln_annotation`` entries. In these cases, the hit and
query sequences may be used directly as SeqRecord objects as they are one-letter
nucleotide codes. The three-letter protein sequences are then stored as entries
in the ``aln_annotation`` dictionary.
For 'exonerate-text', ExonerateIO provides the following object attributes:
+-----------------+-------------------------+----------------------------------+
| Object | Attribute | Value |
+=================+=========================+==================================+
| QueryResult | description | query sequence description |
| +-------------------------+----------------------------------+
| | id | query sequence ID |
| +-------------------------+----------------------------------+
| | model | alignment model |
| +-------------------------+----------------------------------+
| | program | 'exonerate' |
+-----------------+-------------------------+----------------------------------+
| Hit | description | hit sequence description |
| +-------------------------+----------------------------------+
| | id | hit sequence ID |
+-----------------+-------------------------+----------------------------------+
| HSP | hit_split_codons | list of split codon coordinates |
| | | in the hit sequence |
| +-------------------------+----------------------------------+
| | score | alignment score |
| +-------------------------+----------------------------------+
| | query_split_codons | list of split codon coordinates |
| | | in the query sequence |
+-----------------+-------------------------+----------------------------------+
| HSPFragment | aln_annotation | alignment similarity string, hit |
| | | sequence annotation, and/or |
| | | query sequence annotation |
| +-------------------------+----------------------------------+
| | hit | hit sequence |
| +-------------------------+----------------------------------+
| | hit_end | hit sequence end coordinate |
| +-------------------------+----------------------------------+
| | hit_frame | hit sequence reading frame |
| +-------------------------+----------------------------------+
| | hit_start | hit sequence start coordinate |
| +-------------------------+----------------------------------+
| | hit_strand | hit sequence strand |
| +-------------------------+----------------------------------+
| | query | query sequence |
| +-------------------------+----------------------------------+
| | query_end | query sequence end coordinate |
| +-------------------------+----------------------------------+
| | query_frame | query sequence reading frame |
| +-------------------------+----------------------------------+
| | query_start | query sequence start coordinate |
| +-------------------------+----------------------------------+
| | query_strand | query sequence strand |
+-----------------+-------------------------+----------------------------------+
Note that you can also use the default HSP or HSPFragment properties. For
example, to check the intron coordinates of your result you can use the
``query_inter_ranges`` or ``hit_inter_ranges`` properties:
>>> from Bio import SearchIO
>>> fname = 'Exonerate/exn_22_m_genome2genome.exn'
>>> all_qresult = list(SearchIO.parse(fname, 'exonerate-text'))
>>> hsp = all_qresult[-1][-1][-1] # last qresult, last hit, last hsp
>>> hsp
HSP(...)
>>> hsp.query_inter_ranges
[(388, 449), (284, 319), (198, 198), (114, 161)]
>>> hsp.hit_inter_ranges
[(487387, 641682), (386207, 487327), (208677, 386123), (71917, 208639)]
Here you can see that for both query and hit introns, the coordinates
in each tuple is always (start, end) where start <= end. But when you compare
each tuple to the next, the coordinates decrease. This is an indication that
both the query and hit sequences lie on the minus strand. Exonerate outputs
minus strand results in a decreasing manner; the start coordinate is always
bigger than the end coordinate. ExonerateIO preserves the fragment ordering as a
whole, but uses its own standard to store an individual fragment's start and end
coordinates.
You may also notice that the third tuple in ``query_inter_ranges`` is (198, 198),
two exact same numbers. This means that the query sequence does not have any
gaps at that position. The gap is only present in the hit sequence, where we see
that the third tuple contains (208677, 386123), a gap of about 177k bases.
Another example is to use the ``hit_frame_all`` and ``query_frame_all`` to see if
there are any frameshifts in your alignment:
>>> from Bio import SearchIO
>>> fname = 'Exonerate/exn_22_m_coding2coding_fshifts.exn'
>>> qresult = next(SearchIO.parse(fname, 'exonerate-text'))
>>> hsp = qresult[0][0] # first hit, first hsp
>>> hsp
HSP(...)
>>> hsp.query_frame_all
[1, 2, 2, 2]
>>> hsp.hit_frame_all
[1, 1, 3, 1]
Here you can see that the alignment as a whole has three frameshifts. The first
one occurs in the query sequence, after the first fragment (1 -> 2 shift), the
second one occurs in the hit sequence, after the second fragment (1 -> 3 shift),
and the last one also occurs in the hit sequence, before the last fragment (3 ->
1 shift).
There are other default HSP properties that you can use to ease your workflow.
Please refer to the HSP object documentation for more details.
exonerate-vulgar
================
The vulgar format provides a compact way of representing alignments created by
Exonerate. In general, it contains the same information as the plain text output
except for the 'model' information and the actual sequences themselves. You can
expect that the coordinates obtained from using 'exonerate-text' and
'exonerate-vulgar' to be the same. Both formats also creates HSPFragment using
the same triggers: introns, NER blocks, and/or frameshifts.
exonerate-cigar
===============
The cigar format provides an even more compact representation of Exonerate
alignments. However, this comes with a cost of losing information. In the cigar
format, for example, introns are treated as simple deletions. This makes it
impossible for the parser to distinguish between simple deletions or intron
regions. As such, 'exonerate-cigar' may produce different sets of coordinates
and fragments compared to 'exonerate-vulgar' or 'exonerate-text'.
"""
# Known issues & gotchas:
# - The cigar parser does not use the extended cigar string; only supports MID
# - Cigar and vulgar parsing results will most likely be different, due to the
# different type of data stored by both formats
from .exonerate_text import ExonerateTextParser, ExonerateTextIndexer
from .exonerate_vulgar import ExonerateVulgarParser, ExonerateVulgarIndexer
from .exonerate_cigar import ExonerateCigarParser, ExonerateCigarIndexer
# if not used as a module, run the doctest
if __name__ == "__main__":
import sys
# Add path to Bio
sys.path.append('../../..')
from Bio._utils import run_doctest
sys.path.append('Bio/SearchIO/ExonerateIO')
run_doctest()
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/SearchIO/ExonerateIO/__init__.py | Python | gpl-2.0 | 13,014 | [
"Biopython"
] | 78fe17b09c6fb9e4c82c3b6bf8848ba74c812d2815525df12cceff1600696b2a |
#!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Greg Caporaso", "Meg Pirrung"]
__license__ = "GPL"
__version__ = "1.5.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
__status__ = "Development"
"""Contains functions used in the most_wanted_otus.py script."""
from collections import defaultdict
from itertools import cycle
from operator import itemgetter
from os import makedirs
from os.path import basename, join, normpath, splitext
from pickle import dump
from tempfile import NamedTemporaryFile
from pylab import axes, figlegend, figure, legend, pie, savefig
from biom.parse import parse_biom_table
from cogent import DNA, LoadSeqs
from cogent.app.blast import blast_seqs, Blastall
from cogent.app.formatdb import build_blast_db_from_fasta_path
from cogent.parse.blast import BlastResult
from cogent.parse.fasta import MinimalFastaParser
from cogent.util.misc import remove_files
from qiime.colors import data_colors, data_color_order
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import (add_filename_suffix, parse_command_line_parameters,
get_options_lookup, make_option, qiime_system_call)
from qiime.workflow.util import generate_log_fp, WorkflowError, WorkflowLogger
html_header = '<html lang="en"><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Most Wanted OTUs</title><link rel="stylesheet" type="text/css" href="most_wanted_otus.css"></head><body>'
html_footer = '</body></html>'
def generate_most_wanted_list(output_dir, otu_table_fps, rep_set_fp, gg_fp,
nt_fp, mapping_fp, mapping_category, top_n, min_abundance,
max_abundance, min_categories, num_categories_to_plot,
max_gg_similarity, max_nt_similarity, e_value, word_size,
merged_otu_table_fp, suppress_taxonomic_output, jobs_to_start,
command_handler, status_update_callback, force):
try:
makedirs(output_dir)
except OSError:
if not force:
raise WorkflowError("Output directory '%s' already exists. Please "
"choose a different directory, or force overwrite with -f."
% output_dir)
logger = WorkflowLogger(generate_log_fp(output_dir))
commands, blast_results_fp, rep_set_cands_failures_fp, \
master_otu_table_ms_fp = _get_most_wanted_filtering_commands(
output_dir, otu_table_fps,
rep_set_fp, gg_fp, nt_fp, mapping_fp, mapping_category,
min_abundance, max_abundance, min_categories, max_gg_similarity,
e_value, word_size, merged_otu_table_fp, jobs_to_start)
# Execute the commands, but keep the logger open because
# we're going to write additional status updates as we process the data.
command_handler(commands, status_update_callback, logger,
close_logger_on_success=False)
commands = []
# We'll sort the BLAST results by percent identity (ascending) and pick the
# top n.
logger.write("Reading in BLAST results, sorting by percent identity, "
"and picking the top %d OTUs.\n\n" % top_n)
top_n_mw = _get_top_n_blast_results(open(blast_results_fp, 'U'), top_n,
max_nt_similarity)
# Read in our filtered down candidate seqs file and latest filtered and
# collapsed OTU table. We'll need to compute some stats on these to include
# in our report.
logger.write("Reading in filtered candidate sequences and latest filtered "
"and collapsed OTU table.\n\n")
mw_seqs = _get_rep_set_lookup(open(rep_set_cands_failures_fp, 'U'))
master_otu_table_ms = parse_biom_table(open(master_otu_table_ms_fp, 'U'))
# Write results out to tsv and HTML table.
logger.write("Writing most wanted OTUs results to TSV and HTML "
"tables.\n\n")
output_img_dir = join(output_dir, 'img')
try:
makedirs(output_img_dir)
except OSError:
# It already exists, which is okay since we already know we are in
# 'force' mode from above.
pass
tsv_lines, html_table_lines, mw_fasta_lines, plot_fps, plot_data_fps = \
_format_top_n_results_table(top_n_mw,
mw_seqs, master_otu_table_ms, output_img_dir, mapping_category,
suppress_taxonomic_output, num_categories_to_plot)
mw_tsv_rel_fp = 'most_wanted_otus.txt'
mw_tsv_fp = join(output_dir, mw_tsv_rel_fp)
mw_tsv_f = open(mw_tsv_fp, 'w')
mw_tsv_f.write(tsv_lines)
mw_tsv_f.close()
mw_fasta_rel_fp = 'most_wanted_otus.fasta'
mw_fasta_fp = join(output_dir, mw_fasta_rel_fp)
mw_fasta_f = open(mw_fasta_fp, 'w')
mw_fasta_f.write(mw_fasta_lines)
mw_fasta_f.close()
html_dl_links = ('<a href="%s" target="_blank">Download table in tab-'
'separated value (TSV) format</a><br /><a href="%s" '
'target="_blank">Download OTU sequence data in FASTA format</a>' %
(mw_tsv_rel_fp, mw_fasta_rel_fp))
html_lines = '%s<div>%s<br /><br />%s<br />%s</div>%s' % (html_header, html_dl_links,
html_table_lines, html_dl_links, html_footer)
mw_html_f = open(join(output_dir, 'most_wanted_otus.html'), 'w')
mw_html_f.write(html_lines)
mw_html_f.close()
logger.close()
def _get_most_wanted_filtering_commands(output_dir, otu_table_fps, rep_set_fp,
gg_fp, nt_fp, mapping_fp, mapping_category, min_abundance,
max_abundance, min_categories, max_gg_similarity, e_value, word_size,
merged_otu_table_fp, jobs_to_start):
commands = []
otu_tables_to_merge = []
if merged_otu_table_fp is None:
for otu_table_fp in otu_table_fps:
# First filter to keep only new (non-GG) OTUs.
novel_otu_table_fp = join(output_dir, add_filename_suffix(otu_table_fp,
'_novel'))
commands.append([('Filtering out all GG reference OTUs',
'filter_otus_from_otu_table.py -i %s -o %s -e %s' %
(otu_table_fp, novel_otu_table_fp, gg_fp))])
# Next filter to keep only abundant otus in the specified range
# (looking only at extremely abundant OTUs has the problem of yielding
# too many that are similar to stuff in the nt database).
novel_abund_otu_table_fp = join(output_dir,
add_filename_suffix(novel_otu_table_fp, '_min%d_max%d' %
(min_abundance, max_abundance)))
commands.append([('Filtering out all OTUs that do not fall within the '
'specified abundance threshold',
'filter_otus_from_otu_table.py -i %s -o %s -n %d -x %d' %
(novel_otu_table_fp, novel_abund_otu_table_fp, min_abundance,
max_abundance))])
# Remove samples from the table that aren't in the mapping file.
novel_abund_filtered_otu_table_fp = join(output_dir,
add_filename_suffix(novel_abund_otu_table_fp,
'_known_samples'))
commands.append([('Filtering out samples that are not in the mapping '
'file',
'filter_samples_from_otu_table.py -i %s -o %s '
'--sample_id_fp %s' % (novel_abund_otu_table_fp,
novel_abund_filtered_otu_table_fp, mapping_fp))])
# Next, collapse by mapping_category.
otu_table_by_samp_type_fp = join(output_dir,
add_filename_suffix(novel_abund_filtered_otu_table_fp, '_%s' %
mapping_category))
commands.append([('Collapsing OTU table by %s' % mapping_category,
'summarize_otu_by_cat.py -c %s -o %s -m %s -i %s' %
(novel_abund_filtered_otu_table_fp, otu_table_by_samp_type_fp,
mapping_category, mapping_fp))])
otu_tables_to_merge.append(otu_table_by_samp_type_fp)
# Merge all collapsed OTU tables.
master_otu_table_fp = join(output_dir,
'master_otu_table_novel_min%d_max%d_%s.biom' %
(min_abundance, max_abundance, mapping_category))
commands.append([('Merging collapsed OTU tables',
'merge_otu_tables.py -i %s -o %s' %
(','.join(otu_tables_to_merge), master_otu_table_fp))])
else:
master_otu_table_fp = merged_otu_table_fp
# Filter to contain only otus in the specified minimum number of sample
# types.
master_otu_table_ms_fp = join(output_dir, add_filename_suffix(
master_otu_table_fp, '_ms%d' % min_categories))
commands.append([('Filtering OTU table to include only OTUs that appear '
'in at least %d sample groups' % min_categories,
'filter_otus_from_otu_table.py -i %s -o %s -s %d' %
(master_otu_table_fp, master_otu_table_ms_fp, min_categories))])
# Now that we have a filtered down OTU table of good candidate OTUs, filter
# the corresponding representative set to include only these candidate
# sequences.
rep_set_cands_fp = join(output_dir,
add_filename_suffix(rep_set_fp, '_candidates'))
commands.append([('Filtering representative set to include only the '
'latest candidate OTUs',
'filter_fasta.py -f %s -o %s -b %s' %
(rep_set_fp, rep_set_cands_fp, master_otu_table_ms_fp))])
# Find the otus that don't hit GG at a certain maximum similarity
# threshold.
uclust_output_dir = join(output_dir, 'most_wanted_candidates_%s_%s' %
(basename(gg_fp), str(max_gg_similarity)))
commands.append([('Running uclust to get list of sequences that don\'t '
'hit the maximum GG similarity threshold',
'parallel_pick_otus_uclust_ref.py -i %s -o %s -r %s -s %s -O %d' %
(rep_set_cands_fp, uclust_output_dir, gg_fp,
str(max_gg_similarity), jobs_to_start))])
# Filter the rep set to only include the failures from uclust.
rep_set_cands_failures_fp = join(output_dir,
add_filename_suffix(rep_set_cands_fp, '_failures'))
commands.append([('Filtering candidate sequences to only include uclust '
'failures',
'filter_fasta.py -f %s -s %s -o %s' %
(rep_set_cands_fp, join(uclust_output_dir,
splitext(basename(rep_set_cands_fp))[0] + '_failures.txt'),
rep_set_cands_failures_fp))])
# BLAST the failures against nt.
blast_output_dir = join(output_dir, 'blast_output')
commands.append([('BLASTing filtered candidate sequences against nt '
'database',
'parallel_blast.py -i %s -o %s -r %s -D -e %f -w %d -O %d' %
(rep_set_cands_failures_fp, blast_output_dir, nt_fp, e_value,
word_size, jobs_to_start))])
blast_results_fp = join(blast_output_dir,
splitext(basename(rep_set_cands_failures_fp))[0] +
'_blast_out.txt')
return commands, blast_results_fp, rep_set_cands_failures_fp, \
master_otu_table_ms_fp
def _get_top_n_blast_results(blast_results_f, top_n, max_nt_similarity):
"""blast_results should only contain a single hit per query sequence"""
result = []
seen_otus = {}
for line in blast_results_f:
# Skip headers and comments.
line = line.strip()
if line and not line.startswith('#'):
otu_id, subject_id, percent_identity = line.split('\t')[:3]
percent_identity = float(percent_identity)
# Skip otus that are too similar to their subject, and skip
# duplicate query hits.
if ((percent_identity / 100.0) <= max_nt_similarity and
otu_id not in seen_otus):
result.append((otu_id, subject_id, percent_identity))
seen_otus[otu_id] = True
return sorted(result, key=itemgetter(2))[:top_n]
def _get_rep_set_lookup(rep_set_f):
result = {}
for seq_id, seq in MinimalFastaParser(rep_set_f):
seq_id = seq_id.strip().split()[0]
result[seq_id] = seq
return result
def _format_top_n_results_table(top_n_mw, mw_seqs, master_otu_table_ms,
output_img_dir, mapping_category,
suppress_taxonomic_output,
num_categories_to_plot):
tsv_lines = ''
html_lines = ''
mw_fasta_lines = ''
plot_fps = []
plot_data_fps = []
tsv_lines += '#\tOTU ID\tSequence\t'
if not suppress_taxonomic_output:
tsv_lines += 'Greengenes taxonomy\t'
tsv_lines += 'NCBI nt closest match\tNCBI nt % identity\n'
html_lines += ('<table id="most_wanted_otus_table" border="border">'
'<tr><th>#</th><th>OTU</th>')
if not suppress_taxonomic_output:
html_lines += '<th>Greengenes taxonomy</th>'
html_lines += ('<th>NCBI nt closest match</th>'
'<th>Abundance by %s</th></tr>' % mapping_category)
for mw_num, (otu_id, subject_id, percent_identity) in enumerate(top_n_mw):
# Grab all necessary information to be included in our report.
seq = mw_seqs[otu_id]
mw_fasta_lines += '>%s\n%s\n' % (otu_id, seq)
# Splitting code taken from
# http://code.activestate.com/recipes/496784-split-string-into-n-
# size-pieces/
split_seq = [seq[i:i+40] for i in range(0, len(seq), 40)]
if not suppress_taxonomic_output:
tax = master_otu_table_ms.ObservationMetadata[
master_otu_table_ms.getObservationIndex(otu_id)]['taxonomy']
gb_id = subject_id.split('|')[3]
ncbi_link = 'http://www.ncbi.nlm.nih.gov/nuccore/%s' % gb_id
# Compute the abundance of each most wanted OTU in each sample
# grouping and create a pie chart to go in the HTML table.
samp_types = master_otu_table_ms.SampleIds
counts = master_otu_table_ms.observationData(otu_id)
plot_data = _format_pie_chart_data(samp_types, counts,
num_categories_to_plot)
# Piechart code based on:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# pie_demo.html
# http://www.saltycrane.com/blog/2006/12/example-pie-charts-using-
# python-and/
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
patches = pie(plot_data[0], colors=plot_data[2], shadow=True)
# We need a relative path to the image.
pie_chart_filename = 'abundance_by_%s_%s.png' % (mapping_category,
otu_id)
pie_chart_rel_fp = join(basename(normpath(output_img_dir)),
pie_chart_filename)
pie_chart_abs_fp = join(output_img_dir, pie_chart_filename)
savefig(pie_chart_abs_fp, transparent=True)
plot_fps.append(pie_chart_abs_fp)
# Write out pickled data for easy plot editing post-creation.
plot_data_fp = join(output_img_dir, 'abundance_by_%s_%s.p' %
(mapping_category, otu_id))
dump(plot_data, open(plot_data_fp, 'wb'))
plot_data_fps.append(plot_data_fp)
tsv_lines += '%d\t%s\t%s\t' % (mw_num + 1, otu_id, seq)
if not suppress_taxonomic_output:
tsv_lines += '%s\t' % tax
tsv_lines += '%s\t%s\n' % (gb_id, percent_identity)
html_lines += '<tr><td>%d</td><td><pre>>%s\n%s</pre></td>' % (
mw_num + 1, otu_id, '\n'.join(split_seq))
if not suppress_taxonomic_output:
html_lines += '<td>%s</td>' % tax
html_lines += ('<td><a href="%s" target="_blank">%s</a> '
'(%s%% sim.)</td>' % (ncbi_link, gb_id, percent_identity))
# Create the legend as a table- couldn't get mpl to correctly
# plot legend side-by-side the pie chart and don't have time to mess
# with it anymore.
legend_html = _format_legend_html(plot_data)
html_lines += ('<td><table><tr><td><img src="%s" width="300" '
'height="300" /></td><td>%s</td></tr></table></tr>' % (
pie_chart_rel_fp, legend_html))
html_lines += '</table>'
return tsv_lines, html_lines, mw_fasta_lines, plot_fps, plot_data_fps
def _format_pie_chart_data(labels, data, max_count):
if len(labels) != len(data):
raise ValueError("The number of labels does not match the number "
"of counts.")
colors = cycle([data_colors[color].toHex() for color in data_color_order])
result = [(val, label, colors.next()) for val, label in zip(data, labels)]
result = sorted(result, key=itemgetter(0), reverse=True)[:max_count]
total = sum([e[0] for e in result])
result = [(val / total, label, color) for val, label, color in result]
return ([e[0] for e in result],
['%s (%.2f%%)' % (e[1], e[0] * 100.0) for e in result],
[e[2] for e in result])
def _format_legend_html(plot_data):
result = '<ul class="most_wanted_otus_legend">'
for val, label, color in zip(plot_data[0], plot_data[1], plot_data[2]):
result += ('<li><div class="key" style="background-color:%s"></div>%s</li>' % (color,label))
return result + '</ul>'
# def _format_legend_html(plot_data):
# result = '<table class="most_wanted_otus_legend">'
# for val, label, color in zip(plot_data[0], plot_data[1], plot_data[2]):
# result += ('<tr><td bgcolor="%s" width="50"> </td>'
# '<td>%s</td></tr>' % (color, label))
# return result + '</table>'
| cuttlefishh/emp | legacy/code/emp/most_wanted_otus.py | Python | bsd-3-clause | 17,837 | [
"BLAST"
] | efffb3917d06faee469a185802703b69ec6f280a52db82da55ead1649336ccf2 |
'''
Show how to do box generation on the fly
I try to use the least dependency but there still remains some
pls, if this cannot work by default, run :
pip install biopython pyparsing numpy scipy matplotlib prody
in your clean python environment.
'''
from vector import vector_generator,Box
def generate_box_onfly(receptor_filename,ligand_filename,OUT=False,verbose=False):
'''
:param receptor_filename:
:param ligand_filename:
:param OUT: This is for debug purpose , it will output a complex pdb file with ligand and receptor in cubic box.
:return: nothing but a vector
'''
# Later you can even move box when you want to rotate it.
# I think someday it will need so I put in backend.
# For now it will first try to rotate, if failed over 100 times (ligand out of border), then try shift the box.
whatever = vector_generator(receptor_filename, Boxsize=20, Boxrange=1)
Tag, Vec= whatever.generate_vector_from_file(ligand_filename, OUT=OUT,verbose=verbose)
if Tag==False:
print 'Unfortunately ,sth wrong happened in script!'
else:
print 'You will get a result'
return Vec
if __name__=='__main__':
rname = 'example.pdb'
#support mol2 and pdb , I suppose
lname = 'example_ligand.mol'
'''
Here is how to use, very simple, just prepare ligand file and pdb file and you will get the result
if you want to see pdb results , set OUT=True, otherwise leave it off (simply do not contain OUT=xxx)
if you want to see verbose print in python , set verbose=True, otherwise leave it off
'''
Vec = generate_box_onfly(rname,lname,OUT=True)
print Vec
'''
This is a test module to check if everything goes correctly.
'''
#B= Box(center=[12.5,45.36,22.52],Boxsize=20,Boxrange=1)
#B.transform(rotation=[0.2,0.3,0.5],transition=[0.12,0.05,-0.98])
#B.self_test() | Reimilia/pdb_sth | vector_generation/example.py | Python | mit | 1,890 | [
"Biopython"
] | 1f7c5210fba444dba2b7d4c444cfaaf9274b6fc7dc0e4640c7c5c339bcf0e368 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unparse meta AST node into a dict"""
# pylint: disable=invalid-name
from typed_ast import ast3 as ast
class MetaUnparser(ast.NodeVisitor):
"""Python AST Visitor to unparse meta AST node into a dict"""
def visit_Dict(self, node):
keys = [self.visit(key) for key in node.keys]
values = [self.visit(value) for value in node.values]
return dict(zip(keys, values))
def visit_Tuple(self, node):
return tuple(self.visit(element) for element in node.elts)
def visit_List(self, node):
return [self.visit(element) for element in node.elts]
def visit_keyword(self, node):
return node.arg, self.visit(node.value)
def visit_NameConstant(self, node):
return node.value
def visit_Constant(self, node):
return node.value
def visit_Num(self, node):
return node.n
def visit_Str(self, node):
return node.s
| sxjscience/tvm | python/tvm/script/meta_unparser.py | Python | apache-2.0 | 1,702 | [
"VisIt"
] | 08408e0b70db8508b8211d13db7a8cf0724ae9640449005f6484b8300ef06a6f |
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from os import path as op
import glob
import tempfile
from shutil import rmtree
import atexit
def create_chunks(sequence, size):
"""Generate chunks from a sequence
Note. copied from MNE-Python
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def fwhm_kernel_2d(size, fwhm, center=None):
""" Make a square gaussian kernel.
Note: adapted from https://gist.github.com/andrewgiessel/4635563
Parameters
----------
size : int
The length of the square matrix to create.
fmhw : int
The full wdith at hald maximum value.
"""
x = np.arange(0, size, 1, np.float64)
y = x[:, np.newaxis]
# center
x0 = y0 = size // 2
return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
def pupil_kernel(fs, dur=4.0, t_max=0.930, n=10.1, s=1.):
"""Generate pupil response kernel modeled as an Erlang gamma function.
Parameters
----------
fs : int
Sampling frequency (samples/second) to use in generating the kernel.
dur : float
Length (in seconds) of the generated kernel.
t_max : float
Time (in seconds) where the response maximum is stipulated to occur.
n : float
Number of negative-exponential layers in the cascade defining the
s : float | None
Desired value for the area under the kernel. If `None`, no scaling is
performed.
"""
n_samp = int(np.round(fs * dur))
t = np.arange(n_samp, dtype=float) / fs
h = (t ** n) * np.exp(- n * t / t_max)
scal = 1. if s is None else float(s) / (np.sum(h) * (t[1] - t[0]))
h = scal * h
return h
def _get_test_fnames():
"""Get usable test files (omit EDF if no edf2asc)"""
path = op.join(op.dirname(__file__), 'tests', 'data')
fnames = glob.glob(op.join(path, '*.edf'))
return fnames
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead.
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
rmtree(self._path, ignore_errors=True)
def _has_joblib():
"""Helper to determine if joblib is installed"""
try:
import joblib # noqa
except Exception:
return False
else:
return True
def _has_h5py():
"""Helper to determine if joblib is installed"""
try:
import h5py # noqa
except Exception:
return False
else:
return True
def _has_edfapi():
"""Helper to determine if a user has edfapi installed"""
from .edf._raw import has_edfapi
return has_edfapi
_requires_h5py = np.testing.dec.skipif(not _has_h5py(),
'Requires h5py')
_requires_edfapi = np.testing.dec.skipif(not _has_edfapi(), 'Requires edfapi')
| drammock/pyeparse | pyeparse/utils.py | Python | bsd-3-clause | 3,374 | [
"Gaussian"
] | 13f11f447cd46d66028a561cdc423ca6106b367feacba7a6e8016ecf140c9fe9 |
#
# Copyright (C) 2004 Rational Discovery LLC
# All Rights Reserved
#
from rdkit import rdBase
try:
from rdkit.SimDivFilters import rdSimDivPickers
from rdkit.SimDivFilters.rdSimDivPickers import *
except ImportError:
import traceback
rdSimDivPickers=None
| adalke/rdkit | rdkit/SimDivFilters/__init__.py | Python | bsd-3-clause | 269 | [
"RDKit"
] | 5194bd3a72268363594f53fc20cc26b807c981cc4ee036a530cedc696f8386e6 |
"""
Tests for Normalizing Flows.
"""
import os
import sys
import pytest
import deepchem
import numpy as np
import unittest
from deepchem.data import NumpyDataset
try:
import tensorflow as tf
import tensorflow_probability as tfp
from deepchem.models.normalizing_flows import NormalizingFlow, NormalizingFlowModel
tfd = tfp.distributions
tfb = tfp.bijectors
has_tensorflow_probablity = True
except:
has_tensorflow_probablity = False
@unittest.skipIf(not has_tensorflow_probablity,
'tensorflow_probability not installed')
@pytest.mark.tensorflow
def test_normalizing_flow():
flow_layers = [
tfb.RealNVP(
num_masked=1,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[8, 8]))
]
# 3D Multivariate Gaussian base distribution
nf = NormalizingFlow(
base_distribution=tfd.MultivariateNormalDiag(loc=[0., 0.]),
flow_layers=flow_layers)
nfm = NormalizingFlowModel(nf)
# Must be float32 for RealNVP
target_distribution = tfd.MultivariateNormalDiag(loc=[1., 0.])
dataset = NumpyDataset(X=target_distribution.sample(96))
# Tests a simple flow of one RealNVP layer.
X = nfm.flow.sample()
x1 = tf.zeros([2])
x2 = dataset.X[0]
# log likelihoods should be negative
assert nfm.flow.log_prob(X).numpy() < 0
assert nfm.flow.log_prob(x1).numpy() < 0
assert nfm.flow.log_prob(x2).numpy() < 0
# # Fit model
final = nfm.fit(dataset, nb_epoch=5)
print(final)
assert final > 0
| deepchem/deepchem | deepchem/models/tests/test_normalizing_flows.py | Python | mit | 1,505 | [
"Gaussian"
] | dc09816dd0c92baac195faa9a336fb649aa03f08a14a761f4cedb860ad7e43f9 |
import ast
import pydot
class GraphNodeVisitor(ast.NodeVisitor):
def __init__(self):
self.graph = pydot.Dot(graph_type='graph', **self._dot_graph_kwargs())
def visit(self, node):
if len(node.parents) <= 1:
self.graph.add_node(self._dot_node(node))
if len(node.parents) == 1:
self.graph.add_edge(self._dot_edge(node))
super(GraphNodeVisitor, self).visit(node)
def _dot_graph_kwargs(self):
return {}
def _dot_node(self, node):
return pydot.Node(str(node), label=self._dot_node_label(node), **self._dot_node_kwargs(node))
def _dot_node_label(self, node):
fields_labels = []
for field, value in ast.iter_fields(node):
if not isinstance(value, list):
value_label = None
if not isinstance(value, ast.AST):
value_label = repr(value)
elif len(value.parents) > 1:
value_label = self._dot_node_label(value)
if value_label:
fields_labels.append('{0}={1}'.format(field, value_label))
return 'ast.{0}({1})'.format(node.__class__.__name__, ', '.join(fields_labels))
def _dot_node_kwargs(self, node):
return {
'shape': 'box',
'fontname': 'Curier'
}
def _dot_edge(self, node):
return pydot.Edge(str(node.parent), str(node), label=self._dot_edge_label(node), **self._dot_edge_kwargs(node))
def _dot_edge_label(self, node):
label = node.parent_field
if not node.parent_field_index is None:
label += '[{0}]'.format(node.parent_field_index)
return label
def _dot_edge_kwargs(self, node):
return {
'fontname': 'Curier'
}
"""
Source generator node visitor from Python AST was originaly written by Armin Ronacher (2008), license BSD.
"""
BOOLOP_SYMBOLS = {
ast.And: 'and',
ast.Or: 'or'
}
BINOP_SYMBOLS = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.FloorDiv: '//',
ast.Mod: '%',
ast.LShift: '<<',
ast.RShift: '>>',
ast.BitOr: '|',
ast.BitAnd: '&',
ast.BitXor: '^',
ast.Pow: '**'
}
CMPOP_SYMBOLS = {
ast.Eq: '==',
ast.Gt: '>',
ast.GtE: '>=',
ast.In: 'in',
ast.Is: 'is',
ast.IsNot: 'is not',
ast.Lt: '<',
ast.LtE: '<=',
ast.NotEq: '!=',
ast.NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
ast.Invert: '~',
ast.Not: 'not',
ast.UAdd: '+',
ast.USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def to_source(node, indent_with=' ' * 4):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGeneratorNodeVisitor(indent_with)
generator.visit(node)
return ''.join(generator.result)
class SourceGeneratorNodeVisitor(ast.NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_line = False
def write(self, x, node=None):
self.correct_line_number(node)
self.result.append(x)
def correct_line_number(self, node):
if self.new_line:
if self.result:
self.result.append('\n')
self.result.append(self.indent_with * self.indentation)
self.new_line = False
if node and hasattr(node, 'lineno'):
lines = len("".join(self.result).split('\n')) if self.result else 0
line_diff = node.lineno - lines
if line_diff:
self.result.append(('\n' + (self.indent_with * self.indentation)) * line_diff)
def newline(self, node=None):
self.new_line = True
self.correct_line_number(node)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline(decorator)
self.write('@')
self.visit(decorator)
def visit_Assign(self, node):
self.newline(node)
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline(node)
self.visit(node.target)
self.write(' ' + BINOP_SYMBOLS[type(node.op)] + '= ')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline(node)
imports = []
for alias in node.names:
name = alias.name
if alias.asname:
name += ' as ' + alias.asname
imports.append(name)
self.write('from {0} import {1}'.format(node.module, ', '.join(imports)))
def visit_Import(self, node):
for item in node.names:
self.newline(node)
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline(node)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.decorators(node)
self.newline(node)
self.write('def %s(' % node.name, node)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.decorators(node)
self.newline(node)
self.write('class %s' % node.name, node)
for base in node.bases:
paren_or_comma()
self.visit(base)
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline(node)
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while node.orelse:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], ast.If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline(node)
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline(node)
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_Pass(self, node):
self.newline(node)
self.write('pass', node)
def visit_Delete(self, node):
self.newline(node)
self.write('del ')
for target in node.targets:
self.visit(target)
if target is not node.targets[-1]:
self.write(', ')
def visit_Global(self, node):
self.newline(node)
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline(node)
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline(node)
self.write('return')
if node.value:
self.write(' ')
self.visit(node.value)
def visit_Break(self, node):
self.newline(node)
self.write('break')
def visit_Continue(self, node):
self.newline(node)
self.write('continue')
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, ast.Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in enumerate(node.dims):
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield')
if node.value:
self.write(' ')
self.visit(node.value)
def visit_YieldFrom(self, node):
self.write('yield from ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('...')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_arg(self, node):
self.write(node.arg)
def visit_Print(self, node):
self.newline(node)
self.write('print ')
want_comma = False
if node.dest is not None:
self.write('>> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_TryExcept(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
self.newline(node)
self.write('finally:')
self.body(node.finalbody)
def visit_Try(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
if node.finalbody:
self.newline(node)
self.write('finally:')
self.body(node.finalbody)
def visit_ExceptHandler(self, node):
self.newline(node)
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
if isinstance(node.name, ast.AST):
self.visit(node.name)
else:
self.write(node.name)
self.write(':')
self.body(node.body)
def visit_Raise(self, node):
self.newline(node)
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.write(' ')
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
def visit_With(self, node):
self.newline(node)
self.write('with ')
if hasattr(node, 'items'):
for with_item in node.items:
self.visit(with_item.context_expr)
if with_item.optional_vars is not None:
self.write(' as ')
self.visit(with_item.optional_vars)
if with_item != node.items[-1]:
self.write(', ')
elif hasattr(node, 'context_expr'):
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Repr(self, node):
self.write('`')
self.visit(node.value)
self.write('`')
| librallu/RICM4Projet | parser/astmonkey/astmonkey-0.1.1/build/lib/astmonkey/visitors.py | Python | gpl-3.0 | 19,017 | [
"VisIt"
] | 9c35a0b1541c03c79281dfe50c90c94631a9b2e9816179f57047eb47755b8388 |
from i3pystatus.core.util import internet, require
from i3pystatus.weather import WeatherBackend
from datetime import datetime
from urllib.request import urlopen
GEOLOOKUP_URL = 'http://api.wunderground.com/api/%s/geolookup%s/q/%s.json'
STATION_QUERY_URL = 'http://api.wunderground.com/api/%s/%s/q/%s.json'
class Wunderground(WeatherBackend):
'''
This module retrieves weather data using the Weather Underground API.
.. note::
A Weather Underground API key is required to use this module, you can
sign up for a developer API key free at
https://www.wunderground.com/weather/api/
Valid values for ``location_code`` include:
* **State/City_Name** - CA/San_Francisco
* **Country/City** - France/Paris
* **Geolocation by IP** - autoip
* **Zip or Postal Code** - 60616
* **ICAO Airport Code** - icao:LAX
* **Latitude/Longitude** - 41.8301943,-87.6342619
* **Personal Weather Station (PWS)** - pws:KILCHICA30
When not using a ``pws`` or ``icao`` station ID, the location will be
queried (this uses an API query), and the closest station will be used.
For a list of PWS station IDs, visit the following URL:
http://www.wunderground.com/weatherstation/ListStations.asp
.. rubric:: API usage
An API key is allowed 500 queries per day, and no more than 10 in a
given minute. Therefore, it is recommended to be conservative when
setting the update interval (the default is 1800 seconds, or 30
minutes), and one should be careful how often one restarts i3pystatus
and how often a refresh is forced by left-clicking the module.
As noted above, when not using a ``pws`` or ``icao`` station ID, an API
query will be used to determine the station ID to use. This will be
done once when i3pystatus is started, and not repeated until the next
time i3pystatus is started.
When updating weather data, one API query will be used to obtain the
current conditions. The high/low temperature forecast requires an
additonal API query, and is optional (disabled by default). To enable
forecast checking, set ``forecast=True``.
.. _weather-usage-wunderground:
.. rubric:: Usage example
.. code-block:: python
from i3pystatus import Status
from i3pystatus.weather import wunderground
status = Status(logfile='/home/username/var/i3pystatus.log')
status.register(
'weather',
format='{condition} {current_temp}{temp_unit}[ {icon}][ Hi: {high_temp}][ Lo: {low_temp}][ {update_error}]',
colorize=True,
hints={'markup': 'pango'},
backend=wunderground.Wunderground(
api_key='dbafe887d56ba4ad',
location_code='pws:MAT645',
units='imperial',
forecast=True,
update_error='<span color="#ff0000">!</span>',
),
)
status.run()
See :ref:`here <weather-formatters>` for a list of formatters which can be
used.
'''
settings = (
('api_key', 'Weather Underground API key'),
('location_code', 'Location code from wunderground.com'),
('units', '\'metric\' or \'imperial\''),
('use_pws', 'Set to False to use only airport stations'),
('forecast', 'Set to ``True`` to check forecast (generates one '
'additional API request per weather update). If set to '
'``False``, then the ``low_temp`` and ``high_temp`` '
'formatters will be set to empty strings.'),
('update_error', 'Value for the ``{update_error}`` formatter when an '
'error is encountered while checking weather data'),
)
required = ('api_key', 'location_code')
api_key = None
location_code = None
units = 'metric'
use_pws = True
forecast = False
update_error = '!'
# These will be set once weather data has been checked
station_id = None
forecast_url = None
@require(internet)
def init(self):
'''
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
'''
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key,
extra_opts,
self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0]
except (KeyError, IndexError):
raise Exception(
'No locations matched location_code %s' % self.location_code)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for nearest PWS')
self.station_id = 'pws:%s' % nearest_pws
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for nearest airport')
self.station_id = 'icao:%s' % nearest_airport
@require(internet)
def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
no_data = ('', '')
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return no_data
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return no_data
def check_response(self, response):
try:
return response['response']['error']['description']
except KeyError:
# No error in response
return False
@require(internet)
def check_weather(self):
'''
Query the configured/queried station and return the weather data
'''
self.data['update_error'] = ''
try:
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
self.logger.error('No weather data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
forecast = self.api_request(query_url)['forecast']
forecast = forecast['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
# This is a non-fatal error, so don't return but do set the
# error flag.
self.data['update_error'] = self.update_error
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = forecast.get('low', {}).get(unit, '')
high_temp = forecast.get('high', {}).get(unit, '')
else:
low_temp = high_temp = ''
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None, default=''):
if data is None:
data = response
return str(data.get(key, default))
try:
observation_epoch = _find('observation_epoch') or _find('local_epoch')
observation_time = datetime.fromtimestamp(int(observation_epoch))
except (TypeError, ValueError):
log.debug(
'Observation time \'%s\' is not a UNIX timestamp',
observation_epoch
)
observation_time = datetime.fromtimestamp(0)
self.data['city'] = _find('city', response['observation_location'])
self.data['condition'] = _find('weather')
self.data['observation_time'] = observation_time
self.data['current_temp'] = _find('temp_' + temp_unit).split('.')[0]
self.data['low_temp'] = low_temp
self.data['high_temp'] = high_temp
self.data['temp_unit'] = '°' + temp_unit.upper()
self.data['feelslike'] = _find('feelslike_' + temp_unit)
self.data['dewpoint'] = _find('dewpoint_' + temp_unit)
self.data['wind_speed'] = _find('wind_' + speed_unit)
self.data['wind_unit'] = speed_unit
self.data['wind_direction'] = _find('wind_dir')
self.data['wind_gust'] = _find('wind_gust_' + speed_unit)
self.data['pressure'] = _find('pressure_' + pressure_unit)
self.data['pressure_unit'] = pressure_unit
self.data['pressure_trend'] = _find('pressure_trend')
self.data['visibility'] = _find('visibility_' + distance_unit)
self.data['visibility_unit'] = distance_unit
self.data['humidity'] = _find('relative_humidity').rstrip('%')
self.data['uv_index'] = _find('UV')
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking weather. '
'Exception follows:', exc_info=True
)
self.data['update_error'] = self.update_error
| richese/i3pystatus | i3pystatus/weather/wunderground.py | Python | mit | 11,545 | [
"VisIt"
] | e3bfa284aa4e0276c627c07dd5305218787eeb588ed8cd5d0e7d7827775394b9 |
import numpy as np
from NeuronNetworkLayer import NeuronNetworkLayer
def add_dirac_pulse(V, firings, dirac_pulse=30):
"""
Given a record of when neurons fired, will add Dirac pulses of the given magnitude to the
membrane potentials in V.
"""
if firings.size > 0:
V[firings[:, 0], firings[:, 1]] = dirac_pulse
class NeuronNetwork(object):
"""
Generalised model of a neuron network. Controls interaction between various neuron layers.
"""
@classmethod
def create_feed_forward(cls, neuron_layer_class, no_of_neurons_in_layers, scaling_factor):
layers = [neuron_layer_class(n) for n in no_of_neurons_in_layers]
net = cls(layers)
for layer_index in range(1, len(layers)):
from_layer, to_layer = layers[layer_index - 1:layer_index + 1]
layer_scaling_factor = scaling_factor / np.sqrt(from_layer.N)
net.connect_layers([from_layer, to_layer], scaling_factor=layer_scaling_factor)
return net
def __init__(self, neuron_layers, d_max=25):
"""
Initialises the neuron network with given layers.
neuron_layers -- collection of neuron layer objects (Quadratic, HodgkinHuxley, Izkevich)
"""
self.layers = dict(zip(range(len(neuron_layers)), neuron_layers))
self.dt = 0.2
self.d_max = d_max
def tick(self, t):
"""
Advances the simulation by one step on all layers
"""
for layer_index in xrange(len(self.layers)):
self.tick_layer(layer_index, t)
def tick_layer(self, layer_index, t):
"""
Advances a specific layer by one tick in the simulation.
layer_index -- the index of the layer to tick
t -- the current sim time, for marking fired neurons
"""
layer = self.layers[layer_index]
for input_layer_index, S in layer.S.iteritems():
input_layer = self.layers[input_layer_index]
delay = layer.delay[input_layer_index]
scaling_factor = layer.factor[input_layer_index]
for (firing_time, neuron_index) in input_layer.firings_after(t - self.d_max):
idx = np.where(delay[:, neuron_index] == (t - firing_time))[0]
layer.I[idx] += scaling_factor * S[idx, neuron_index]
layer.tick(self.dt, t)
def connect_layers(self, layers, scaling_factor=None, delay=None, S=None):
"""
Connects the configured layers with the following parameters...
layers -- [target_idx, input_idx]
scaling_factor -- scaling for input voltage
delay -- conduction delay, ms after a neuron fires from layer and is picked up in to layer
S -- synaptic connection strength matrix
"""
if len(layers) != 2:
raise StandardError("Expected layers to be an array of [target_index,input_index]")
target_idx, input_idx = layers
target_layer = self.layers[target_idx]
target_layer.S[input_idx] = S
target_layer.factor[input_idx] = scaling_factor
target_layer.delay[input_idx] = delay
return self
def _identify_layer(self, layer_item):
"""
Receives either a layer index, or a layer and returns layer_index,layer
"""
if isinstance(layer_item, NeuronNetworkLayer):
return [(index, layer_item) for index, _ in self.layers.items() if _ == layer_item][0]
else:
return (layer_item, self.layers[layer_item])
| lawrencejones/neuro | Exercise_4/neuro/NeuronNetwork.py | Python | gpl-3.0 | 3,528 | [
"DIRAC",
"NEURON"
] | 26a82caa2cd6d293721561e3649663f38249fd4918837eac91f6dcd310777f77 |
"""Tests for composite filter """
from __future__ import (print_function, absolute_import, division, unicode_literals)
import os
import pytest
import numpy as np
import nebulio
import pysynphot
from nebulio.tests.utils import this_func_name
from matplotlib import pyplot as plt
def plot_filterset(fs):
title = this_func_name(2)
fig, ax = plt.subplots()
# Plot each bandpass
for bp in fs.bandpasses:
ax.plot(bp.wave, bp.T, '-', label=bp.fname)
for emline in fs.emlines:
if emline.fwhm_kms is not None:
for wav0, strength, fwhm in zip(emline.wave, emline.intensity,
emline.fwhm_angstrom):
gauss = pysynphot.GaussianSource(1.0, wav0, fwhm)
ax.plot(gauss.wave,
strength*gauss.flux*fs.bandpasses[0].T.max()/(emline.intensity[0]*gauss.flux.max()),
label='{:.2f} A'.format(wav0))
ax.set_title(title)
ax.set_xlim(min([bp.wav0 - bp.Wj for bp in fs.bandpasses[:2]]),
max([bp.wav0 + bp.Wj for bp in fs.bandpasses[:2]]))
ax.legend()
plotfile = os.path.join("plots", '{}.pdf'.format(this_func_name(2)))
fig.savefig(plotfile)
def plot_composite_bandpass(cbp, emline=None):
title = this_func_name(2)
fig, ax = plt.subplots()
# Plot the composite bandpass
ax.plot(cbp.wave, cbp.T, '-', label='composite')
# Plot the individual constituent bandpasses
for bp in cbp.bandpasses:
ax.plot(bp.wave, bp.T, '-', label=bp.fname)
# Plot Gaussian profiles of all components of emission line multiplet
if emline is not None:
if emline.fwhm_kms is not None:
title += ' {}, V = {:.1f} km/s, W = {:.1f} km/s'.format(
emline.lineid, emline.velocity, emline.fwhm_kms)
for wav0, strength, fwhm in zip(emline.wave, emline.intensity,
emline.fwhm_angstrom):
gauss = pysynphot.GaussianSource(1.0, wav0, fwhm)
ax.plot(gauss.wave, gauss.flux*cbp.T.max()/gauss.flux.max(),
label='{:.2f} A'.format(wav0))
ax.set_xlim(cbp.wav0 - cbp.Wj, cbp.wav0 + cbp.Wj)
ax.set_title(title)
ax.legend()
plotfile = os.path.join("plots", '{}.pdf'.format(this_func_name(2)))
fig.savefig(plotfile)
def test_twin_sii_filter():
fnames = ['wfc3,uvis1,FQ672N', 'wfc3,uvis1,FQ674N']
cbp = nebulio.CompositeBandpass(fnames)
sii_doublet = nebulio.EmissionLine("[S II] 6724", velocity=25.0, fwhm_kms=40.0)
plot_composite_bandpass(cbp, sii_doublet)
assert cbp.Wj*cbp.Tm == np.sum([bp.Wj*bp.Tm for bp in cbp.bandpasses])
def test_filterset_with_composite_sii():
fs = nebulio.Filterset(
bpnames=[['wfc3,uvis1,FQ672N', 'wfc3,uvis1,FQ674N'],
"wfc3,uvis1,F673N", "wfc3,uvis1,F547M"],
lineids=['[S II] 6724', '[S II] 6724'],
velocity=25.0, fwhm_kms=20.0
)
print(fs.__dict__)
plot_filterset(fs)
assert True # what to test?
def test_filterset_with_nii_ha():
fs = nebulio.Filterset(
bpnames=["wfc3,uvis1,F658N", "wfc3,uvis1,F656N", "wfc3,uvis1,F547M"],
lineids=['[N II] 6583', 'H I 6563'],
velocity=25.0, fwhm_kms=20.0
)
print(fs.__dict__)
plot_filterset(fs)
assert True # what to test?
| deprecated/nebulio | nebulio/tests/test_composite.py | Python | mit | 3,402 | [
"Gaussian"
] | c61d29e6dfe9e4aa3d348d154768da83d5ca18e5f24a35f56fe40b60cccd201b |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Prescription'
db.create_table(u'patient_prescription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('patient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['patient.PatientInformation'])),
('date', self.gf('django.db.models.fields.DateField')()),
('name_of_prescription', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'patient', ['Prescription'])
# Changing field 'Routinecheckup.date'
db.alter_column(u'patient_routinecheckup', 'date', self.gf('django.db.models.fields.DateField')())
# Changing field 'LaboratoryTest.date'
db.alter_column(u'patient_laboratorytest', 'date', self.gf('django.db.models.fields.DateField')())
# Changing field 'UltrasoundScanning.date'
db.alter_column(u'patient_ultrasoundscanning', 'date', self.gf('django.db.models.fields.DateField')())
def backwards(self, orm):
# Deleting model 'Prescription'
db.delete_table(u'patient_prescription')
# Changing field 'Routinecheckup.date'
db.alter_column(u'patient_routinecheckup', 'date', self.gf('django.db.models.fields.DateTimeField')())
# Changing field 'LaboratoryTest.date'
db.alter_column(u'patient_laboratorytest', 'date', self.gf('django.db.models.fields.DateTimeField')())
# Changing field 'UltrasoundScanning.date'
db.alter_column(u'patient_ultrasoundscanning', 'date', self.gf('django.db.models.fields.DateTimeField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'previous_surgery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousSurgery']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_previous_obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousObstetricHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient'] | aazhbd/medical_info01 | patient/migrations/0009_auto__add_prescription__chg_field_routinecheckup_date__chg_field_labor.py | Python | bsd-3-clause | 30,927 | [
"VisIt"
] | b67ce273139217b4901b4190d5aa240d68a7ad2d6acfead8618e66940661b098 |
"""Prototype pattern
"""
import copy
from collections import OrderedDict
class Book:
def __init__(self, name, authors, price, **kwargs):
"""Examples of kwargs: publisher, length, tags, publication date"""
self.name = name
self.authors = authors
self.price = price
self.__dict__.update(kwargs)
def __str__(self):
mylist = []
ordered = OrderedDict(sorted(self.__dict__.items()))
for i in ordered.keys():
mylist.append("{}: {}".format(i, ordered[i]))
if i == "price":
mylist.append("$")
mylist.append("\n")
return "".join(mylist)
class Prototype:
def __init__(self):
self.objects = dict()
def register(self, identifier, obj):
self.objects[identifier] = obj
def unregister(self, identifier):
del self.objects[identifier]
def clone(self, identifier, **attr):
found = self.objects.get(identifier)
if not found:
raise ValueError("Incorrect object identifier: {}".format(identifier))
obj = copy.deepcopy(found)
obj.__dict__.update(attr)
return obj
def main():
b1 = Book(
name="The C Programming Language",
authors=("Brian W. Kernighan", "Dennis M.Ritchie"),
price=118,
publisher="Prentice Hall",
length=228,
publication_date="1978-02-22",
tags=("C", "programming", "algorithms", "data structures"),
)
prototype = Prototype()
cid = "k&r-first"
prototype.register(cid, b1)
b2 = prototype.clone(
cid,
name="The C Programming Language (ANSI)",
price=48.99,
length=274,
publication_date="1988-04-01",
edition=2,
)
for i in (b1, b2):
print(i)
print("ID b1 : {} != ID b2 : {}".format(id(b1), id(b2)))
if __name__ == "__main__":
main()
| jackaljack/design-patterns | prototype.py | Python | mit | 1,912 | [
"Brian"
] | be4613f83a2acc0ba38bd6a86ab374482d1053627ac4584fab15eaac74f59283 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Quantum ESPRESSO
#####################
"""
from .cp.dynamics import parse_symbols_from_input, parse_evp, parse_xyz
| exa-analytics/atomic | exatomic/qe/__init__.py | Python | apache-2.0 | 257 | [
"Quantum ESPRESSO"
] | 63405e2f2f0c941f62163de8d88cf8d846c60fb6b4862939d614edc063ae5c19 |
"""
Tabular datatype
"""
import pkg_resources
pkg_resources.require( "bx-python" )
import logging
import data
from galaxy import util
from cgi import escape
from galaxy.datatypes import metadata
from galaxy.datatypes.metadata import MetadataElement
from sniff import *
log = logging.getLogger(__name__)
class Tabular( data.Text ):
"""Tab delimited data"""
"""Add metadata elements"""
MetadataElement( name="comment_lines", default=0, desc="Number of comment lines", readonly=False, optional=True, no_value=0 )
MetadataElement( name="columns", default=0, desc="Number of columns", readonly=True, visible=False, no_value=0 )
MetadataElement( name="column_types", default=[], desc="Column types", param=metadata.ColumnTypesParameter, readonly=True, visible=False, no_value=[] )
def init_meta( self, dataset, copy_from=None ):
data.Text.init_meta( self, dataset, copy_from=copy_from )
def set_meta( self, dataset, overwrite = True, skip = None, **kwd ):
"""
Tries to determine the number of columns as well as those columns
that contain numerical values in the dataset. A skip parameter is
used because various tabular data types reuse this function, and
their data type classes are responsible to determine how many invalid
comment lines should be skipped. Using None for skip will cause skip
to be zero, but the first line will be processed as a header.
Items of interest:
1. We treat 'overwrite' as always True (we always want to set tabular metadata when called).
2. If a tabular file has no data, it will have one column of type 'str'.
3. We used to check only the first 100 lines when setting metadata and this class's
set_peek() method read the entire file to determine the number of lines in the file.
Since metadata can now be processed on cluster nodes, we've merged the line count portion
of the set_peek() processing here, and we now check the entire contents of the file.
"""
# Store original skip value to check with later
requested_skip = skip
if skip is None:
skip = 0
column_type_set_order = [ 'int', 'float', 'list', 'str' ] #Order to set column types in
default_column_type = column_type_set_order[-1] # Default column type is lowest in list
column_type_compare_order = list( column_type_set_order ) #Order to compare column types
column_type_compare_order.reverse()
def type_overrules_type( column_type1, column_type2 ):
if column_type1 is None or column_type1 == column_type2:
return False
if column_type2 is None:
return True
for column_type in column_type_compare_order:
if column_type1 == column_type:
return True
if column_type2 == column_type:
return False
#neither column type was found in our ordered list, this cannot happen
raise "Tried to compare unknown column types"
def is_int( column_text ):
try:
int( column_text )
return True
except:
return False
def is_float( column_text ):
try:
float( column_text )
return True
except:
if column_text.strip().lower() == 'na':
return True #na is special cased to be a float
return False
def is_list( column_text ):
return "," in column_text
def is_str( column_text ):
#anything, except an empty string, is True
if column_text == "":
return False
return True
is_column_type = {} #Dict to store column type string to checking function
for column_type in column_type_set_order:
is_column_type[column_type] = locals()[ "is_%s" % ( column_type ) ]
def guess_column_type( column_text ):
for column_type in column_type_set_order:
if is_column_type[column_type]( column_text ):
return column_type
return None
data_lines = 0
comment_lines = 0
column_types = []
first_line_column_types = [default_column_type] # default value is one column of type str
if dataset.has_data():
#NOTE: if skip > num_check_lines, we won't detect any metadata, and will use default
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if i < skip or not line or line.startswith( '#' ):
# We'll call blank lines comments
comment_lines += 1
else:
data_lines += 1
fields = line.split( '\t' )
for field_count, field in enumerate( fields ):
if field_count >= len( column_types ): #found a previously unknown column, we append None
column_types.append( None )
column_type = guess_column_type( field )
if type_overrules_type( column_type, column_types[field_count] ):
column_types[field_count] = column_type
if i == 0 and requested_skip is None:
# This is our first line, people seem to like to upload files that have a header line, but do not
# start with '#' (i.e. all column types would then most likely be detected as str). We will assume
# that the first line is always a header (this was previous behavior - it was always skipped). When
# the requested skip is None, we only use the data from the first line if we have no other data for
# a column. This is far from perfect, as
# 1,2,3 1.1 2.2 qwerty
# 0 0 1,2,3
# will be detected as
# "column_types": ["int", "int", "float", "list"]
# instead of
# "column_types": ["list", "float", "float", "str"] *** would seem to be the 'Truth' by manual
# observation that the first line should be included as data. The old method would have detected as
# "column_types": ["int", "int", "str", "list"]
first_line_column_types = column_types
column_types = [ None for col in first_line_column_types ]
#we error on the larger number of columns
#first we pad our column_types by using data from first line
if len( first_line_column_types ) > len( column_types ):
for column_type in first_line_column_types[len( column_types ):]:
column_types.append( column_type )
#Now we fill any unknown (None) column_types with data from first line
for i in range( len( column_types ) ):
if column_types[i] is None:
if len( first_line_column_types ) <= i or first_line_column_types[i] is None:
column_types[i] = default_column_type
else:
column_types[i] = first_line_column_types[i]
# Set the discovered metadata values for the dataset
dataset.metadata.data_lines = data_lines
dataset.metadata.comment_lines = comment_lines
dataset.metadata.column_types = column_types
dataset.metadata.columns = len( column_types )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
try:
out.append( '<tr>' )
# Generate column header
for i in range( 1, dataset.metadata.columns+1 ):
out.append( '<th>%s</th>' % str( i ) )
out.append( '</tr>' )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % str( exc )
return out
def make_html_peek_rows( self, dataset, skipchars=[] ):
out = [""]
comments = []
if not dataset.peek:
dataset.set_peek()
data = dataset.peek
lines = data.splitlines()
for line in lines:
line = line.rstrip( '\r\n' )
if not line:
continue
comment = False
for skipchar in skipchars:
if line.startswith( skipchar ):
comments.append( line )
comment = True
break
if comment:
continue
elems = line.split( '\t' )
if len( elems ) != dataset.metadata.columns:
# We may have an invalid comment line or invalid data
comments.append( line )
comment = True
continue
while len( comments ) > 0: # Keep comments
try:
out.append( '<tr><td colspan="100%">' )
except:
out.append( '<tr><td>' )
out.append( '%s</td></tr>' % escape( comments.pop(0) ) )
out.append( '<tr>' )
for elem in elems: # valid data
elem = escape( elem )
out.append( '<td>%s</td>' % elem )
out.append( '</tr>' )
# Peek may consist only of comments
while len( comments ) > 0:
try:
out.append( '<tr><td colspan="100%">' )
except:
out.append( '<tr><td>' )
out.append( '%s</td></tr>' % escape( comments.pop(0) ) )
return "".join( out )
def set_peek( self, dataset, line_count=None, is_multi_byte=False ):
data.Text.set_peek( self, dataset, line_count=line_count, is_multi_byte=is_multi_byte )
if dataset.metadata.comment_lines:
dataset.blurb = "%s, %s comments" % ( dataset.blurb, util.commaify( str( dataset.metadata.comment_lines ) ) )
def display_peek( self, dataset ):
"""Returns formatted html of peek"""
return self.make_html_table( dataset )
def as_gbrowse_display_file( self, dataset, **kwd ):
return open( dataset.file_name )
def as_ucsc_display_file( self, dataset, **kwd ):
return open( dataset.file_name )
class Taxonomy( Tabular ):
def __init__(self, **kwd):
"""Initialize taxonomy datatype"""
Tabular.__init__( self, **kwd )
self.column_names = ['Name', 'TaxId', 'Root', 'Superkingdom', 'Kingdom', 'Subkingdom',
'Superphylum', 'Phylum', 'Subphylum', 'Superclass', 'Class', 'Subclass',
'Superorder', 'Order', 'Suborder', 'Superfamily', 'Family', 'Subfamily',
'Tribe', 'Subtribe', 'Genus', 'Subgenus', 'Species', 'Subspecies'
]
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
# This data type requires at least 24 columns in the data
if dataset.metadata.columns - len( self.column_names ) > 0:
for i in range( len( self.column_names ), dataset.metadata.columns ):
out.append( '<th>%s</th>' % str( i+1 ) )
out.append( '</tr>' )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
class Sam( Tabular ):
file_ext = 'sam'
def __init__(self, **kwd):
"""Initialize taxonomy datatype"""
Tabular.__init__( self, **kwd )
self.column_names = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR',
'MRNM', 'MPOS', 'ISIZE', 'SEQ', 'QUAL', 'OPT'
]
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
# This data type requires at least 11 columns in the data
if dataset.metadata.columns - len( self.column_names ) > 0:
for i in range( len( self.column_names ), dataset.metadata.columns ):
out.append( '<th>%s</th>' % str( i+1 ) )
out.append( '</tr>' )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
def sniff( self, filename ):
"""
Determines whether the file is in SAM format
A file in SAM format consists of lines of tab-separated data.
The following header line may be the first line:
@QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL
or
@QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL OPT
Data in the OPT column is optional and can consist of tab-separated data
For complete details see http://samtools.sourceforge.net/SAM1.pdf
Rules for sniffing as True:
There must be 11 or more columns of data on each line
Columns 2 (FLAG), 4(POS), 5 (MAPQ), 8 (MPOS), and 9 (ISIZE) must be numbers (9 can be negative)
We will only check that up to the first 5 alignments are correctly formatted.
>>> fname = get_test_fname( 'sequence.maf' )
>>> Sam().sniff( fname )
False
>>> fname = get_test_fname( '1.sam' )
>>> Sam().sniff( fname )
True
"""
try:
fh = open( filename )
count = 0
while True:
line = fh.readline()
line = line.strip()
if not line:
break #EOF
if line:
if line[0] != '@':
linePieces = line.split('\t')
if len(linePieces) < 11:
return False
try:
check = int(linePieces[1])
check = int(linePieces[3])
check = int(linePieces[4])
check = int(linePieces[7])
check = int(linePieces[8])
except ValueError:
return False
count += 1
if count == 5:
return True
fh.close()
if count < 5 and count > 0:
return True
except:
pass
return False
| volpino/Yeps-EURAC | lib/galaxy/datatypes/tabular.py | Python | mit | 15,751 | [
"Galaxy"
] | cdf3c23cc82c98eaec01fa64fffe7a0fc770a59261a80a7afb41393579c32167 |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import pyqtSignal
from vistrails.core import debug
from vistrails.gui.mashups.mashups_widgets import QAliasNumericStepperWidget, \
QAliasSliderWidget, QDropDownWidget
from vistrails.gui.utils import show_warning, TestVisTrailsGUI
from vistrails.packages.spreadsheet.spreadsheet_controller import \
spreadsheetController
class QMashupAppMainWindow(QtGui.QMainWindow):
#signals
appWasClosed = pyqtSignal(QtGui.QMainWindow)
def __init__(self, parent=None, vistrail_view=None, dumpcells=False,
controller=None, version=-1):
""" QMashupAppMainWindow()
Initialize an app window from a mashup.
"""
# Constructing the main widget
QtGui.QMainWindow.__init__(self, parent)
self.vtkCells = []
self.setStatusBar(QtGui.QStatusBar(self))
# Central widget
centralWidget = QtGui.QWidget()
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setMargin(0)
self.mainLayout.setSpacing(5)
centralWidget.setLayout(self.mainLayout)
self.setCentralWidget(centralWidget)
self.numberOfCells = 0
self.is_executing = False
self.sequenceOption = False
self.steps = []
self.isLooping = False
#self.resize(100,100)
self.dumpcells = dumpcells
self.view = vistrail_view
if controller:
self.controller = controller
self.mshptrail = controller.mshptrail
if version == -1:
self.currentMashup = self.controller.currentMashup
else:
self.currentMashup = self.mshptrail.getMashup(version)
self.setWindowTitle('%s Mashup'%self.controller.getMashupName(version))
else:
self.setWindowTitle('Mashup')
# Assign "hidden" shortcut
self.editingModeAct = QtGui.QAction("Chang&e Layout",
self, shortcut="Ctrl+E",
statusTip="Change the layout of the widgets",
triggered=self.toggleEditingMode)
#self.editingModeShortcut = QtGui.QShortcut(QtGui.QKeySequence('Ctrl+E'), self)
#self.connect(self.editingModeShortcut, QtCore.SIGNAL('activated()'),
# self.toggleEditingMode)
self.editing = False
# Constructing alias controls
self.controlDocks = []
# Show here to make sure XDisplay info is correct (for VTKCell)
self.show()
spreadsheetController.setEchoMode(True)
#will run to get Spreadsheet Cell events
(cellEvents, errors) = self.runAndGetCellEvents(useDefaultValues=True)
if cellEvents:
self.numberOfCells = len(cellEvents)
self.initCells(cellEvents)
if len(errors) > 0:
show_warning("VisTrails::Mashup Preview",
"There was a problem executing the pipeline: %s." %
errors)
# Construct the controllers for aliases
self.controlDocks = {}
self.cellControls = {}
self.aliasWidgets = {}
self.initControls()
if self.currentMashup.layout is not None:
self.restoreState(QtCore.QByteArray.fromPercentEncoding(
QtCore.QByteArray(self.currentMashup.layout)))
if self.currentMashup.geometry is not None:
self.restoreGeometry(QtCore.QByteArray.fromPercentEncoding(
QtCore.QByteArray(self.currentMashup.geometry)))
else:
self.resize(self.sizeHint())
# Constructing buttons
buttonDock = QCustomDockWidget('Control Buttons', self)
buttonWidget = QtGui.QWidget(buttonDock)
buttonWidget.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Preferred)
buttonLayout = QtGui.QGridLayout()
buttonWidget.setLayout(buttonLayout)
buttonLayout.setMargin(5)
self.cb_auto_update = QtGui.QCheckBox("Turn on auto-update",
self.centralWidget())
self.cb_auto_update.setChecked(False)
self.cb_loop_sequence = QtGui.QCheckBox("Render all steps in '%s'" % \
(self.sequenceOption.alias.name if self.sequenceOption else 'None'),
self.centralWidget())
self.cb_loop_sequence.setChecked(False)
self.cb_loop_sequence.setVisible(self.sequenceOption is not False)
self.cb_loop_sequence.setToolTip(
"Render each step of this stepper for fast switching")
self.cb_loop_int = QtGui.QCheckBox("Interactive Steps",
self.centralWidget())
self.cb_loop_int.setChecked(False)
self.cb_loop_int.setVisible(False)
self.cb_loop_int.setToolTip(
"Show complete result of each step instead of static images")
self.cb_loop_sequence.clicked.connect(self.cb_loop_int.setVisible)
self.cb_keep_camera = QtGui.QCheckBox("Keep camera position",
self.centralWidget())
self.cb_keep_camera.setChecked(True)
self.connect(self.cb_auto_update,
QtCore.SIGNAL("stateChanged(int)"),
self.auto_update_changed)
self.connect(self.cb_loop_int,
QtCore.SIGNAL("stateChanged(int)"),
self.loop_int_changed)
self.loopButton = QtGui.QPushButton("&Loop", self.centralWidget())
self.loopButton.setToolTip("Loop automatically through steps")
self.loopButton.setCheckable(True)
self.loopButton.setVisible(self.sequenceOption is not False)
self.updateButton = QtGui.QPushButton("&Update", self.centralWidget())
if self.dumpcells:
self.quitButton = QtGui.QPushButton("&Save", self.centralWidget())
self.connect(self.quitButton,
QtCore.SIGNAL('clicked(bool)'),
self.saveAndExport)
else:
self.quitButton = QtGui.QPushButton("&Quit", self.centralWidget())
self.connect(self.quitButton,
QtCore.SIGNAL('clicked(bool)'),
self.close)
buttonLayout.setColumnStretch(0, 1)
if self.sequenceOption:
sequenceLayout = QtGui.QHBoxLayout()
sequenceLayout.setMargin(5)
sequenceLayout.addWidget(self.cb_loop_int)
sequenceLayout.addWidget(self.cb_loop_sequence)
buttonLayout.addLayout(sequenceLayout, 0, 0, QtCore.Qt.AlignRight)
buttonLayout.addWidget(self.cb_auto_update, 0, 1, QtCore.Qt.AlignLeft)
buttonLayout.addWidget(self.cb_keep_camera, 0, 2, 1, 2, QtCore.Qt.AlignLeft)
if self.sequenceOption:
buttonLayout.addWidget(self.loopButton, 1, 1, QtCore.Qt.AlignRight)
self.loopButton.setEnabled(False)
buttonLayout.addWidget(self.updateButton, 1, 2, QtCore.Qt.AlignRight)
buttonLayout.addWidget(self.quitButton, 1, 3, QtCore.Qt.AlignRight)
self.connect(self.updateButton,
QtCore.SIGNAL('clicked(bool)'),
self.updateButtonClick)
if self.sequenceOption:
self.connect(self.loopButton,
QtCore.SIGNAL('clicked(bool)'),
self.loopButtonClick)
buttonDock.setWidget(buttonWidget)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, buttonDock)
self.controlDocks["__buttons__"] = buttonDock
self.saveAllAct = QtGui.QAction("S&ave Combined", self,
shortcut=QtGui.QKeySequence.SelectAll,
statusTip="Save combined images to disk",
triggered=self.saveAllEvent)
self.saveAct = QtGui.QAction("&Save Each", self,
shortcut=QtGui.QKeySequence.Save,
statusTip="Save separate images to disk",
triggered=self.saveEventAction)
self.showBuilderAct = QtGui.QAction("VisTrails Main Window", self,
statusTip="Show VisTrails Main Window",
triggered=self.showBuilderWindow)
self.createMenus()
self.lastExportPath = ''
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAllAct)
self.viewMenu = self.menuBar().addMenu("&View")
self.viewMenu.addAction(self.editingModeAct)
self.windowMenu = self.menuBar().addMenu("&Window")
self.windowMenu.addAction(self.showBuilderAct)
def runAndGetCellEvents(self, useDefaultValues=False):
spreadsheetController.setEchoMode(True)
#will run to get Spreadsheet Cell events
cellEvents = []
errors = []
try:
(res, errors) = self.run(useDefaultValues)
if res:
cellEvents = spreadsheetController.getEchoCellEvents()
except Exception, e:
debug.unexpected_exception(e)
print "Executing pipeline failed:", debug.format_exc()
finally:
spreadsheetController.setEchoMode(False)
return (cellEvents, errors)
def updateCells(self, info=None):
# check if we should create a sequence
if self.cb_loop_sequence.isChecked():
return self.updateCellsLoop(info)
self.is_executing = True
(cellEvents, errors) = self.runAndGetCellEvents()
self.is_executing = False
if len(cellEvents) != self.numberOfCells:
raise RuntimeError(
"The number of cells has changed (unexpectedly) "
"(%d vs. %d)!\n"
"Pipeline results: %s" % (len(cellEvents),
self.numberOfCells,
errors))
#self.SaveCamera()
for i in xrange(self.numberOfCells):
camera = []
if (hasattr(self.cellWidgets[i],"getRendererList") and
self.cb_keep_camera.isChecked()):
for ren in self.cellWidgets[i].getRendererList():
camera.append(ren.GetActiveCamera())
self.cellWidgets[i].updateContents(cellEvents[i].inputPorts, camera)
#self.cellWidgets[i].updateContents(cellEvents[i].inputPorts)
else:
self.cellWidgets[i].updateContents(cellEvents[i].inputPorts)
def updateCellsLoop(self, info=None):
""" Run workflow for each step in the loop sequence and collect results.
"""
interactive = self.cb_loop_int.isChecked()
slider = self.sequenceOption.value
if info and info[1][0] == slider:
# User is moving the slider, so we use the existing result
if interactive:
if slider.value() < len(self.steps):
self.updateRenderedCells(slider.value())
else:
for i in xrange(self.numberOfCells):
self.cellWidgets[i].setPlayerFrame(slider.value())
return
if not interactive:
for i in xrange(self.numberOfCells):
self.cellWidgets[i].clearHistory()
self.is_executing = True
self.steps = []
old_value = slider.value()
value = slider.minimum()
slider.setValue(value)
while True:
(cellEvents, errors) = self.runAndGetCellEvents()
if len(cellEvents) != self.numberOfCells:
raise RuntimeError(
"The number of cells has changed (unexpectedly) "
"(%d vs. %d)!\n"
"Pipeline results: %s" % (len(cellEvents),
self.numberOfCells,
errors))
if interactive:
self.steps.append([])
else:
self.steps = [[]]
for i in xrange(self.numberOfCells):
self.steps[-1].append(cellEvents[i].inputPorts)
# show the result
self.updateRenderedCells(value if interactive else 0)
self.is_executing = True
if value >= slider.maximum():
break
value += slider.singleStep()
slider.setValue(value)
self.is_executing = False
slider.setValue(old_value)
self.loopButton.setEnabled(True)
def updateRenderedCells(self, value):
""" Show the cell specified by slider info
"""
self.is_executing = True
for i in xrange(self.numberOfCells):
camera = []
if (hasattr(self.cellWidgets[i],"getRendererList") and
self.cb_keep_camera.isChecked()):
for ren in self.cellWidgets[i].getRendererList():
camera.append(ren.GetActiveCamera())
self.cellWidgets[i].updateContents(self.steps[value][i], camera)
else:
self.cellWidgets[i].updateContents(self.steps[value][i])
self.is_executing = False
def updateButtonClick(self):
self.updateButton.setEnabled(False)
try:
self.updateCells()
finally:
self.updateButton.setEnabled(True)
def loopButtonClick(self, toggled):
self.updateButton.setEnabled(not toggled)
self.cb_loop_int.setEnabled(not toggled)
self.cb_loop_sequence.setEnabled(not toggled)
self.cb_auto_update.setEnabled(not toggled)
if self.cb_loop_int.isChecked():
if toggled:
if self.isLooping:
self.killTimer(self.isLooping)
self.isLooping = self.startTimer(200)
elif self.isLooping:
self.killTimer(self.isLooping)
self.isLooping = None
else:
for cell in self.cellWidgets:
if toggled:
cell.startPlayer()
else:
cell.stopPlayer()
def timerEvent(self, event):
if self.steps:
stepper = self.sequenceOption.value
if stepper.value() == stepper.maximum():
stepper.setValue(stepper.minimum())
else:
stepper.setValue(stepper.value() + stepper.singleStep())
if stepper.value() >= len(self.steps):
self.loopButton.setChecked(False)
return
for i in xrange(self.numberOfCells):
self.cellWidgets[i].setAnimationEnabled(False)
self.updateRenderedCells(stepper.value())
for i in xrange(self.numberOfCells):
self.cellWidgets[i].setAnimationEnabled(True)
def toggleEditingMode(self):
if len(self.controlDocks) > 0:
for dock in self.controlDocks.itervalues():
dock.toggleTitleBar()
self.editing = not self.editing
if not self.editing:
self.saveSettings()
def saveSettings(self):
layout = self.saveState().toPercentEncoding()
geom = self.saveGeometry().toPercentEncoding()
self.currentMashup.layout = layout
self.currentMashup.geometry = geom
self.controller.setChanged(True)
#self.controller.writeMashuptrail()
def closeEvent(self, event):
self.saveSettings()
self.appWasClosed.emit(self)
event.accept()
def auto_update_changed(self, state):
if state == QtCore.Qt.Unchecked:
self.updateButton.setEnabled(True)
self.cb_loop_int.setEnabled(True)
self.cb_loop_sequence.setEnabled(True)
self.loopButton.setEnabled(True)
if self.cb_loop_sequence.isChecked() and not self.cb_loop_int.isChecked():
for i in xrange(self.numberOfCells):
cell = self.cellWidgets[i]
cell._player.hide()
cell.show()
elif state == QtCore.Qt.Checked:
self.updateButton.setEnabled(False)
self.cb_loop_int.setEnabled(False)
self.cb_loop_sequence.setEnabled(False)
self.loopButton.setEnabled(False)
if self.cb_loop_sequence.isChecked() and not self.cb_loop_int.isChecked():
for i in xrange(self.numberOfCells):
cell = self.cellWidgets[i]
cell._player.setParent(cell.parent())
cell._player.setGeometry(cell.geometry())
self.cellWidgets[i].setPlayerFrame(self.sequenceOption.value.value())
cell._player.raise_()
cell._player.show()
cell.hide()
def loop_int_changed(self, state):
self.loopButton.setEnabled(False)
def saveAll(self):
for w in self.widgets:
w.saveAll(self.dumpcells)
def saveEach(self):
for w in self.widgets:
w.saveEach(self.dumpcells, self.frameNo)
def saveEventAction(self, checked):
self.saveEvent()
def saveEvent(self, folder=None):
if folder is None:
folder = QtGui.QFileDialog.getExistingDirectory(self,
"Save images to...",
self.lastExportPath,
QtGui.QFileDialog.ShowDirsOnly)
if folder:
self.dumpcells = str(folder)
self.saveEach()
self.lastExportPath = str(folder)
def saveAllEvent(self, folder=None):
if folder is None:
folder = QtGui.QFileDialog.getExistingDirectory(self,
"Save images to...",
self.lastExportPath,
QtGui.QFileDialog.ShowDirsOnly)
if folder:
self.dumpcells = str(folder)
self.saveAll()
def saveAndExport(self, clicked=True):
self.saveAll()
def initCells(self, cellEvents):
cellLayout = QtGui.QHBoxLayout()
self.mainLayout.addLayout(cellLayout, self.numberOfCells * 2)
self.cellWidgets = []
vtkCells = []
for event in cellEvents:
cellWidget = event.cellType(self.centralWidget())
if event.cellType.__name__ == 'QVTKWidget':
vtkCells.append(cellWidget)
cellWidget.show()
cellWidget.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
cellWidget.setMinimumSize(300, 100)
cellLayout.addWidget(cellWidget)
self.cellWidgets.append(cellWidget)
cellWidget.updateContents(event.inputPorts)
def getSelectedCellWidgets():
return vtkCells
for cellWidget in self.vtkCells:
cellWidget.getSelectedCellWidgets = getSelectedCellWidgets
def initControls(self):
if len(self.currentMashup.alias_list) == 0:
return
#Constructing alias controls
self.controlDocks = {}
self.cellControls = {}
self.toolbuttons = {}
row = 0
for alias in self.currentMashup.alias_list:
dock = QCustomDockWidget(alias.name, #"Control for '%s'" % aliasName,
self)
vtparam = self.controller.getVistrailParam(alias)
if alias.component.widget == 'slider':
aliasWidget = QAliasSliderWidget(alias, vtparam, dock)
# enables looping of
if alias.component.seq:
self.sequenceOption = aliasWidget
elif alias.component.widget == 'numericstepper':
aliasWidget = QAliasNumericStepperWidget(alias, vtparam, dock)
else:
aliasWidget = QDropDownWidget(alias, vtparam, dock)
aliasWidget.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
self.connect(aliasWidget,
QtCore.SIGNAL("contentsChanged"),
self.widget_changed)
dock.setWidget(aliasWidget)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock)
self.controlDocks[alias.name] = dock
self.cellControls[alias.name] = aliasWidget.value
row += 1
self.aliasWidgets[alias.name] = aliasWidget
# Added a stretch space
stretchDock = QCustomDockWidget('Stretch Space', self)
stretch = QtGui.QWidget()
stretch.setLayout(QtGui.QVBoxLayout())
stretch.layout().addStretch()
stretchDock.setWidget(stretch)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, stretchDock)
self.controlDocks["_stretch_"] = stretchDock
def widget_changed(self, info):
if self.cb_auto_update.isChecked() and not self.is_executing:
self.updateCells(info)
def run(self, useDefaultValues=False):
# Building the list of parameter values
params = []
if useDefaultValues:
for alias in self.currentMashup.alias_list:
params.append((alias.component.vttype, alias.component.vtid,
alias.component.val))
else:
for (aliasName, edit) in self.cellControls.iteritems():
alias = self.currentMashup.getAliasByName(aliasName)
if hasattr(edit, 'contents'):
val = str(edit.contents())
else:
val =str(edit.text())
params.append((alias.component.vttype, alias.component.vtid,
val))
results = self.controller.execute(params)[0]
result = results[0]
(objs, errors, executed) = (result.objects, result.errors,
result.executed)
if len(errors) > 0:
print '=== ERROR EXECUTING PIPELINE ==='
print errors
return (False, errors)
return (True, [])
def showBuilderWindow(self):
from vistrails.gui.vistrails_window import _app
_app.show()
class QCustomDockWidget(QtGui.QDockWidget):
def __init__(self, title, parent=None):
QtGui.QDockWidget.__init__(self, title, parent)
self.setObjectName(title)
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable |
QtGui.QDockWidget.DockWidgetMovable)
self.emptyTitleBar = QtGui.QWidget()
self.titleBarVisible = True
self.hideTitleBar()
def showTitleBar(self):
self.titleBarVisible = True
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable |
QtGui.QDockWidget.DockWidgetMovable)
self.setMaximumHeight(524287)
self.setTitleBarWidget(None)
def hideTitleBar(self):
self.titleBarVisible = False
self.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.setTitleBarWidget(self.emptyTitleBar)
def toggleTitleBar(self):
if self.titleBarVisible:
self.hideTitleBar()
else:
self.showTitleBar()
################################################################################
# Testing
class TestMashupApp(TestVisTrailsGUI):
def setUp(self):
super(TestMashupApp, self).setUp()
try:
import vtk
except ImportError:
self.skipTest("VTK is not available")
from vistrails.core.packagemanager import get_package_manager
from vistrails.core.modules.module_registry import MissingPackage
pm = get_package_manager()
identifier = 'org.vistrails.vistrails.vtk'
try:
pkg = pm.get_package(identifier)
except MissingPackage:
pkg = pm.identifier_is_available(identifier)
if pkg:
pm.late_enable_package(pkg.codepath)
pkg = pm.get_package(identifier)
def test_load_mashup(self):
import vistrails.api
import vistrails.core.system
filename = (vistrails.core.system.vistrails_root_directory() +
'/tests/resources/spx_loop.vt')
view = vistrails.api.open_vistrail_from_file(filename)
view.controller.flush_delayed_actions()
id = "d5026457-de6c-11e2-b074-3c07543dba07"
mashup = view.get_mashup_from_mashuptrail_id(id, "loop")
self.assert_(mashup)
view.open_mashup(mashup)
mashup = view.get_mashup_from_mashuptrail_id(id, "no loop")
self.assert_(mashup)
view.open_mashup(mashup)
| Nikea/VisTrails | vistrails/gui/mashups/mashup_app.py | Python | bsd-3-clause | 27,120 | [
"VTK"
] | 985103b33e7f1ea4754bf1512597d4dfc107108f6af12e049a4f6ea1617adb2d |
#
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
"""tests that look at the wholistic behavior of records"""
from __future__ import absolute_import
from past.builtins import basestring
from datetime import datetime
import unittest2
import warnings
from normalize import ListProperty
from normalize import Property
from normalize import Record
from normalize import V1Property
import normalize.exc as exc
from normalize.visitor import VisitorPattern
class TestRecords(unittest2.TestCase):
"""Test that the new data descriptor classes work"""
def test_false_emptiness(self):
"""Test that Properties with falsy empty values don't throw
exceptions"""
class SophiesRecord(Record):
placeholder = Property()
aux_placeholder = Property(default='')
age = Property(default=0)
name = V1Property(isa=basestring)
sophie = SophiesRecord()
with self.assertRaises(AttributeError):
sophie.placeholder
self.assertFalse(sophie.placeholder0)
self.assertEqual(sophie.aux_placeholder, '')
self.assertFalse(sophie.aux_placeholder0)
self.assertEqual(sophie.age, 0)
self.assertFalse(sophie.age0)
self.assertEqual(sophie.name, None)
with self.assertRaises(AttributeError):
sophie.name0
sophie.name = "Sophie"
self.assertEqual(sophie.name, "Sophie")
sophie.name = None
self.assertEqual(sophie.name, None)
# the properties aren't really set, but VisitorPattern sees them.
expected = {"age": 0, "aux_placeholder": ""}
self.assertEqual(VisitorPattern.visit(sophie), expected)
sophie.age = 1
expected['age'] = 1
self.assertEqual(VisitorPattern.visit(sophie), expected)
sophie.age = 0
expected['age'] = 0
self.assertEqual(VisitorPattern.visit(sophie), expected)
del sophie.age
self.assertEqual(VisitorPattern.visit(sophie), expected)
def test_functional_emptiness(self):
"""Test that functional empty values are transient"""
class BlahRecord(Record):
blah = Property()
class LambdaRecord(Record):
epoch = Property(isa=datetime)
objective = Property(isa=BlahRecord)
lambda_ = LambdaRecord()
self.assertFalse(
lambda_.epoch0.isoformat()[:4].bob.foo,
"empty values work",
)
self.assertFalse(lambda_.objective0.blah0,
"empty values don't persist")
with self.assertRaisesRegexp(AttributeError, r'BlahRecord.*blha0'):
lambda_.objective0.blha0
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"Can't assign.*BlahRecord"
):
lambda_.objective0.blah = 42
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"Can't assign.*BlahRecord"
):
lambda_.objective0[0] = 42
def test_bad_constructor(self):
"""Test that 'empty' definition errors are no longer possible"""
with warnings.catch_warnings(record=True) as w:
class OhNoRecord(Record):
lets_go = Property(isa=datetime)
self.assertEqual(len(w), 0)
def test_empty_type_inference(self):
class OneRecord(Record):
foo = Property(isa=type(2))
class TwoRecord(Record):
bar = Property(isa=type(None))
def __call__(self):
return "hi"
class NumRecord(Record):
which = Property(isa=(OneRecord, TwoRecord))
class NumsRecord(Record):
nums = ListProperty(of=NumRecord)
nr = NumsRecord()
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"NumRecordList.*attribute 'which'",
):
nr.nums0.which
self.assertFalse(nr.nums0[1].which)
with self.assertRaisesRegexp(
exc.EmptyAttributeError, r"NumRecord\b.*attribute 'blah'",
):
nr.nums0[0].blah
self.assertFalse(nr.nums0[2].which.foo)
self.assertFalse(nr.nums0[2].which.bar)
# 0 forms also work as well
self.assertFalse(nr.nums0[3].which0.bar0)
self.assertFalse(nr.nums0[4].which0.foo0)
# array slicing
self.assertFalse(nr.nums0[3:-1][0].which0.foo0)
with self.assertRaisesRegexp(
exc.NotSubscriptable, r"OneRecord,TwoRecord"
):
nr.nums0[1].which[1]
# test invoking
with self.assertRaisesRegexp(exc.NotCallable, r"NumRecord"):
nr.nums0[1]()
self.assertFalse(nr.nums0[4].which())
class MagicRecord(Record):
def __getattr__(self, whatever):
return 1
class MagicList(Record):
def __getitem__(self, whatever):
return 1
class LooseRecord(Record):
this = Property(isa=(OneRecord, TwoRecord, datetime))
that = Property(isa=MagicRecord)
other = Property(isa=MagicList)
lr = LooseRecord()
self.assertFalse(lr.this0.date)
self.assertFalse(lr.this0.foo.real)
with self.assertRaisesRegexp(
exc.NoSuchAttribute, r"TwoRecord,datetime",
):
lr.this0.dote
with self.assertRaisesRegexp(exc.NoSuchAttribute, r"None"):
self.assertFalse(lr.this0.bar.real)
self.assertFalse(lr.that0.date)
with self.assertRaisesRegexp(
exc.NotSubscriptable, r"MagicRecord",
):
lr.that0[7]
self.assertFalse(lr.other0[0].foo.bar())
with self.assertRaisesRegexp(exc.NoSuchAttribute, r"MagicList"):
lr.other0.anything
def test_v1_none(self):
class SafeRecord(Record):
maybe_int = V1Property(isa=int)
maybe_str = V1Property(isa=basestring, json_name="maybeStr")
self.assertEqual(type(SafeRecord.maybe_int).__name__, "V1Property")
# FIXME: the name combination code should know that 'Safe' is
# not needed in this combination
self.assertEqual(
type(SafeRecord.maybe_str).__name__, "V1SafeJsonProperty",
)
sr = SafeRecord(maybe_int=4, maybe_str="hey")
del sr.maybe_int
self.assertEqual(sr.maybe_int, None)
del sr.maybe_str
self.assertEqual(sr.maybe_str, None)
sr = SafeRecord(maybe_int=4, maybe_str="hey")
sr.maybe_int = None
self.assertEqual(sr.maybe_int, None)
sr.maybe_str = None
self.assertEqual(sr.maybe_str, None)
| hearsaycorp/normalize | tests/test_record.py | Python | mit | 7,177 | [
"VisIt"
] | 1e19a75b986364b8cd097356f879783acc43b470aa42f646b0ec13b96f5b9c01 |
""" Test_RSS_Command_GOCDBStatusCommand
"""
__RCSID__ = '$Id$'
from datetime import datetime, timedelta
import mock
from DIRAC import gLogger, S_OK
from DIRAC.ResourceStatusSystem.Command.DowntimeCommand import DowntimeCommand
"""
Setup
"""
gLogger.setLevel('DEBUG')
# Mock external libraries / modules not interesting for the unit test
seMock = mock.MagicMock()
seMock.options = {'SEType': 'T0D1'}
mock_GOCDBClient = mock.MagicMock()
mock_RMClient = mock.MagicMock()
mock_RMClient.addOrModifyDowntimeCache.return_value = S_OK()
args = {'name': 'aName', 'element': 'Resource', 'elementType': 'StorageElement'}
def test_instantiate():
""" tests that we can instantiate one object of the tested class
"""
command = DowntimeCommand()
assert command.__class__.__name__ == 'DowntimeCommand'
def test_init():
""" tests that the init method does what it should do
"""
command = DowntimeCommand()
assert command.args == {'onlyCache': False}
assert command.apis == {}
command = DowntimeCommand(clients={'GOCDBClient': mock_GOCDBClient})
assert command.args == {'onlyCache': False}
assert command.apis == {'GOCDBClient': mock_GOCDBClient}
command = DowntimeCommand(args)
_args = dict(args)
_args.update({'onlyCache': False})
assert command.args == _args
assert command.apis == {}
def test_doCache(mocker):
""" tests the doCache method
"""
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.StorageElement", return_value=seMock)
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.getSEHosts",
return_value=S_OK(['someHost', 'aSecondHost']))
command = DowntimeCommand(args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
# CASE01: get ongoing DT from 2 DTs where one ongoing the other in the future
now = datetime.utcnow()
resFromDB = {'OK': True,
'Value': ((now - timedelta(hours=2),
'1 aRealName',
'https://blah',
now + timedelta(hours=3),
'aRealName',
now - timedelta(hours=2),
'maintenance',
'OUTAGE',
now,
'Resource'),
(now + timedelta(hours=12),
'2 aRealName',
'https://blah',
now + timedelta(hours=14),
'aRealName',
now + timedelta(hours=12),
'maintenance',
'OUTAGE',
now,
'Resource')
),
'Columns': ['StartDate', 'DowntimeID', 'Link', 'EndDate', 'Name',
'DateEffective', 'Description', 'Severity', 'LastCheckTime', 'Element']}
mock_RMClient.selectDowntimeCache.return_value = resFromDB
command = DowntimeCommand(args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
assert res['Value']['DowntimeID'] == '1 aRealName'
mock_RMClient.selectDowntimeCache.return_value = resFromDB
args.update({'hours': 2})
command = DowntimeCommand(args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
assert res['Value']['DowntimeID'] == '1 aRealName'
# CASE02: get future DT from 2 DTs where one ongoing the other in the future
resFromDB = {'OK': True,
'Value': ((now - timedelta(hours=12),
'1 aRealName',
'https://blah',
now - timedelta(hours=2),
'aRealName',
now - timedelta(hours=12),
'maintenance',
'OUTAGE',
now,
'Resource'),
(now + timedelta(hours=2),
'2 aRealName',
'https://blah',
now + timedelta(hours=14),
'aRealName',
now + timedelta(hours=2),
'maintenance',
'OUTAGE',
now,
'Resource')
),
'Columns': ['StartDate', 'DowntimeID', 'Link', 'EndDate', 'Name',
'DateEffective', 'Description', 'Severity', 'LastCheckTime', 'Element']}
mock_RMClient.selectDowntimeCache.return_value = resFromDB
args.update({'hours': 3})
command = DowntimeCommand(args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
assert res['Value']['DowntimeID'] == '2 aRealName'
# CASE03: get DT from 2 overlapping OUTAGE DTs, one ongoing the other starting in the future
resFromDB = {'OK': True,
'Value': ((now - timedelta(hours=12),
'1 aRealName',
'https://blah',
now + timedelta(hours=2),
'aRealName',
now - timedelta(hours=12),
'maintenance',
'OUTAGE',
now,
'Resource'),
(now + timedelta(hours=2),
'2 aRealName',
'https://blah',
now + timedelta(hours=14),
'aRealName',
now + timedelta(hours=2),
'maintenance',
'OUTAGE',
now,
'Resource')
),
'Columns': ['StartDate', 'DowntimeID', 'Link', 'EndDate', 'Name',
'DateEffective', 'Description', 'Severity', 'LastCheckTime', 'Element']}
mock_RMClient.selectDowntimeCache.return_value = resFromDB
args.update({'hours': 0})
command = DowntimeCommand(
args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
assert res['Value']['DowntimeID'] == '1 aRealName'
# CASE04: get DT from 2 ongoing DTs, first OUTAGE the other WARNING
resFromDB = {'OK': True,
'Value': ((now - timedelta(hours=10),
'1 aRealName',
'https://blah',
now + timedelta(hours=2),
'aRealName',
now - timedelta(hours=12),
'maintenance',
'OUTAGE',
now,
'Resource'),
(now - timedelta(hours=12),
'2 aRealName',
'https://blah',
now + timedelta(hours=4),
'aRealName',
now + timedelta(hours=2),
'maintenance',
'WARNING',
now,
'Resource')
),
'Columns': ['StartDate', 'DowntimeID', 'Link', 'EndDate', 'Name',
'DateEffective', 'Description', 'Severity', 'LastCheckTime', 'Element']
}
mock_RMClient.selectDowntimeCache.return_value = resFromDB
args.update({'hours': 0})
command = DowntimeCommand(
args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
assert res['Value']['DowntimeID'] == '1 aRealName'
# CASE05: get DT from 2 overlapping future DTs, the first WARNING the other OUTAGE
resFromDB = {'OK': True,
'Value': ((now + timedelta(hours=8),
'1 aRealName',
'https://blah',
now + timedelta(hours=12),
'aRealName',
now + timedelta(hours=8),
'maintenance',
'WARNING',
now,
'Resource'),
(now + timedelta(hours=9),
'2 aRealName',
'https://blah',
now + timedelta(hours=11),
'aRealName',
now + timedelta(hours=9),
'maintenance',
'OUTAGE',
now,
'Resource')
),
'Columns': ['StartDate', 'DowntimeID', 'Link', 'EndDate', 'Name',
'DateEffective', 'Description', 'Severity', 'LastCheckTime', 'Element']
}
mock_RMClient.selectDowntimeCache.return_value = resFromDB
args.update({'hours': 10})
command = DowntimeCommand(args, {'ResourceManagementClient': mock_RMClient})
res = command.doCache()
assert res['OK'] is True
assert res['Value']['DowntimeID'] == '2 aRealName'
def test_doNew():
""" tests the doNew method
"""
args = {'element': 'X'}
command = DowntimeCommand(args, {'GOCDBClient': mock_GOCDBClient})
res = command.doNew()
assert res['OK'] is False
getGOCSiteNameMock = mock.MagicMock()
getGOCSiteNameMock.return_value = {'OK': True, 'Value': 'aSite'}
mock_GOCDBClient.getStatus.return_value = S_OK()
command = DowntimeCommand({'element': 'Site', "name": 'aSite', 'elementType': 'Z'},
{'GOCDBClient': mock_GOCDBClient})
res = command.doNew()
assert res['OK'] is True
assert res['Value'] is None
mock_GOCDBClient.getStatus.return_value = {'OK': True,
'Value': {'669 devel.edu.mk': {
'HOSTED_BY': 'MK-01-UKIM_II',
'DESCRIPTION': 'Problem with SE server',
'SEVERITY': 'OUTAGE',
'HOSTNAME': 'devel.edu.mk',
'GOCDB_PORTAL_URL': 'myURL',
'FORMATED_END_DATE': '2011-07-20 00:00',
'FORMATED_START_DATE': '2011-07-16 00:00'
}}}
command = DowntimeCommand({'element': 'Resource', 'name': '669 devel.edu.mk', 'elementType': 'Z'},
{'GOCDBClient': mock_GOCDBClient,
'ResourceManagementClient': mock_RMClient})
res = command.doNew()
assert res['OK'] is True
def test_doMaster(mocker):
""" tests the doMaster method
"""
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.getGOCSites", return_value=S_OK())
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.getStorageElementsHosts", return_value=S_OK())
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.getFTS3Servers", return_value=S_OK())
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.getGOCSites", return_value=S_OK())
mocker.patch("DIRAC.ResourceStatusSystem.Command.DowntimeCommand.getComputingElements", return_value=S_OK())
command = DowntimeCommand({'element': 'Resource', 'name': '669 devel.edu.mk', 'elementType': 'Z'},
{'GOCDBClient': mock_GOCDBClient,
'ResourceManagementClient': mock_RMClient})
res = command.doMaster()
assert res['OK'] is True
| andresailer/DIRAC | ResourceStatusSystem/Command/test/Test_RSS_Command_GOCDBStatusCommand.py | Python | gpl-3.0 | 11,902 | [
"DIRAC"
] | 71c000fc1e923c285f18de88ee123faeddf805785c8ed6596c7886cc5908f968 |
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/SWIG_scripts/A8 case
# Create table
# Do not use pv as a short name.
# It is a name of function from numpy and may be redefined implicitly by 'from numpy import *' call.
# import paraview.simple as pv
import paraview.simple as pvs
import paraview.servermanager as sm
# Define script for table creation
script = """
import math
# Get table output
table = self.GetTableOutput()
nb_rows = 10
nb_cols = 200
# Create first column
col1 = vtk.vtkDoubleArray()
col1.SetName('Frequency')
for i in xrange(0, nb_rows):
col1.InsertNextValue(i * 10 + 1)
table.AddColumn(col1)
# Create the rest columns
for i in xrange(1, nb_cols + 1):
col = vtk.vtkDoubleArray()
col.SetName('Power ' + str(i))
# Fill the next column
for j in xrange(0, nb_rows):
if j % 2 == 1:
col.InsertNextValue(math.log10(j * 30 * math.pi / 180) * 20 + i * 15 + j * 5)
else:
col.InsertNextValue(math.sin(j * 30 * math.pi / 180) * 20 + i * 15 + j * 5)
table.AddColumn(col)
"""
# Creating programmable source (table)
ps = pvs.ProgrammableSource()
ps.OutputDataSetType = 'vtkTable'
ps.Script = script
pvs.RenameSource("Very useful data", ps)
ps.UpdatePipeline()
| FedoraScientific/salome-paravis | test/VisuPrs/SWIG_scripts/A8.py | Python | lgpl-2.1 | 2,064 | [
"ParaView",
"VTK"
] | e69a7fd4b85cc163df3cb606e0e1a9af6145314dd4030578f250c2095dd758b5 |
"""
The :mod:`sklearn.lda` module implements Linear Discriminant Analysis (LDA).
"""
from __future__ import print_function
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsumexp
from .utils import check_array, check_X_y
__all__ = ['LDA']
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
coef_ : array-like, shape = [rank, n_classes - 1]
Coefficients of the features in the linear decision
function. rank is min(rank_features, n_classes) where
rank_features is the dimensionality of the spaces spanned
by the features (i.e. n_features excluding redundant features).
covariance_ : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes).
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
scalings_ : array-like, shape = [rank, n_classes - 1]
Scaling of the features in the space spanned by the class
centroids.
xbar_ : float, shape = [n_features]
Overall mean.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print('warning: the priors do not sum to 1. Renormalizing')
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, axis=0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scalings_ = np.dot(scalings, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(self.coef_ ** 2, axis=1) +
np.log(self.priors_))
return self
def _decision_function(self, X):
X = check_array(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scalings_)
return np.dot(X, self.coef_.T) + self.intercept_
def decision_function(self, X):
"""
This function returns the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = check_array(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scalings_)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function returns posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsumexp(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
| soulmachine/scikit-learn | sklearn/lda.py | Python | bsd-3-clause | 9,517 | [
"Gaussian"
] | 180a85fd1243c1579826db0b5403aca94890d228b75f69b377166ed1ee8be12d |
"""
This is the XROOTD StorageClass
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.Utilities.File import getSize
import os
from types import StringType, ListType, DictType
from XRootD import client
from XRootD.client.flags import DirListFlags, OpenFlags, MkDirFlags, QueryCode, StatInfoFlags
class XROOTStorage( StorageBase ):
""" .. class:: XROOTStorage
Xroot interface to StorageElement using pyxrootd
"""
def __init__( self, storageName, parameters ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param str protocol: protocol to use
:param str rootdir: base path for vo files
:param str host: SE host
:param int port: port to use to communicate with :host:
:param str spaceToken: space token
:param str wspath: location of SRM on :host:
"""
# # init base class
StorageBase.__init__( self, storageName, parameters )
self.log = gLogger.getSubLogger( "XROOTStorage", True )
# self.log.setLevel( "DEBUG" )
self.pluginName = 'XROOT'
self.protocol = self.protocolParameters['Protocol']
self.host = self.protocolParameters['Host']
# Aweful hack to cope for the moment with the inability of RSS to deal with something else than SRM
#self.port = ""
#self.wspath = ""
#self.spaceToken = ""
self.protocolParameters['Port'] = 0
self.protocolParameters['WSUrl'] = 0
self.protocolParameters['SpaceToken'] = 0
# The API instance to be used
self.xrootClient = client.FileSystem( self.host )
def exists( self, path ):
"""Check if the given path exists. The 'path' variable can be a string or a list of strings.
:param self: self reference
:param path: path (or list of path) on storage (it's a pfn root://blablabla)
:returns Failed dictionary: {pfn : errorMsg}
Successful dictionary: {pfn : bool}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.exists: Checking the existence of %s path(s)" % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__singleExists( url )
# Check if there was a fatal error
if not res['OK']:
return res
# No fatal error, lets check if we could verify the existance
res = res['Value']
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __singleExists( self, path ):
"""Check if the given path exists. The 'path' variable can be a string or a list of strings.
:param self: self reference
:param path: path (only 1) on storage (it's a pfn root://blablabla)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (boolean exists)) a boolean whether it exists or not
S_OK (S_ERROR (errorMsg)) if there was a problem getting the information
"""
self.log.debug( "XROOTStorage.__singleExists: Determining whether %s exists." % path )
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status, statInfo = self.xrootClient.stat( xFilePath )
if status.ok:
self.log.debug( "XROOTStorage.__singleExists: Path exists." )
return S_OK( S_OK( True ) )
else:
# I don't know when the fatal flag is set, or if it is even ever set
if status.fatal:
errStr = "XROOTStorage.__singleExists: Completely failed to determine the existence of file."
self.log.fatal( errStr, "%s %s" % ( self.name, status.message ) )
return S_ERROR( errStr )
elif status.error:
# errno 3011 corresponds to the file not existing
if status.errno == 3011:
errStr = "XROOTStorage.__singleExists: Path does not exists"
self.log.debug( errStr, path )
return S_OK( S_OK( False ) )
else:
errStr = "XROOTStorage.__singleExists: Failed to determine the existence of file"
self.log.debug( errStr, status.message )
return S_OK( S_ERROR( errStr ) )
errStr = "XROOTStorage.__singleExists : reached end of method, should not happen"
self.log.error( errStr )
return S_ERROR ( errStr )
#############################################################
#
# These are the methods for file manipulation
#
def isFile( self, path ):
"""Check if the given path exists and it is a file
:param self: self reference
:param path: path (or list of path) on storage
:returns: Successful dict {path : boolean}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.isFile: Determining whether %s paths are files." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__isSingleFile( url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __isSingleFile( self, path ):
"""Check if the given path exists and it is a file
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (boolean)) if it is a file or not
S_OK (S_ERROR (errorMsg)) if there was a problem getting the info
"""
self.log.debug( "XROOTStorage.__isSingleFile: Determining whether %s is a file." % path )
return self.__getSingleMetadata( path, 'File' )
def getFile( self, path, localPath = False ):
""" make a local copy of a storage :path:
:param self: self reference
:param str path: path (pfn root://) on storage
:param mixed localPath: if not specified, self.cwd
:returns Successful dict {path : size}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getFile: Trying to download %s files." % len( urls ) )
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
# other plugins use os.getcwd insted of self.cwd
# -> error self.cwd is for remote, ot is os.getcwd the right one
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getSingleFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleFile( self, src_url, dest_file ):
""" do a real copy of storage file :src_url: to local fs under :dest_file:
:param self: self reference
:param str src_url: SE url to cp (root://...)
:param str dest_file: local fs path
:returns: S_ERROR(errorMsg) in case of any problem
S_OK(size of file) if all goes well
"""
self.log.info( "XROOTStorage.__getSingleFile: Trying to download %s to %s" % ( src_url, dest_file ) )
if not os.path.exists( os.path.dirname( dest_file ) ):
self.log.debug( "XROOTStorage.__getSingleFile: Local directory does not yet exist. Creating...", os.path.dirname( dest_file ) )
try:
os.makedirs( os.path.dirname( dest_file ) )
except OSError, error:
errStr = "XROOTStorage.__getSingleFile: Exception creation the destination directory"
self.log.exception( errStr, error )
return S_ERROR( errStr )
# Fetch the remote file size
# I know that logicalLy I should create the local path first
# but this gives a more coherent errors in case it is a directory
# ("not a file" rather than "cannot delete local directory"
res = self.__getSingleFileSize( src_url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
# Error getting the size
if not res['OK']:
errStr = "XROOTStorage.__getSingleFile: Error getting the file size."
self.log.exception( errStr, res['Message'] )
return S_ERROR( errStr )
remoteSize = res['Value']
# I could also just use the Force option of the copy method of the API...
if os.path.exists( dest_file ):
self.log.debug( "XROOTStorage.__getSingleFile: Local file already exists. Removing...", dest_file )
try:
os.remove( dest_file )
except OSError, error:
errStr = "XROOTStorage.__getSingleFile: Exception removing the file."
self.log.exception( errStr, "%s" % error )
return S_ERROR( errStr )
status = self.xrootClient.copy( src_url, dest_file )
# For some reason, the copy method returns a tuple (status,None)
status = status[0]
if status.ok:
self.log.debug( 'XROOTStorage.__getSingleFile: Got a file from storage.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
self.log.debug( "XROOTStorage.__getSingleFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "XROOTStorage.__getSingleFile: Source and destination file sizes do not match (%s vs %s)." % ( remoteSize, localSize )
self.log.error( errorMessage, src_url )
else:
errorMessage = "XROOTStorage.__getSingleFile: Failed to get file from storage."
errStr = "%s %s" % ( status.message, status.errno )
self.log.error( errorMessage, errStr )
if os.path.exists( dest_file ):
self.log.debug( "XROOTStorage.__getSingleFile: Removing local file %s." % dest_file )
try:
os.remove( dest_file )
except OSError, error:
errorMessage = "XROOTStorage.__getSingleFile: Exception removing local file "
self.log.exception( errorMessage, error )
return S_ERROR( errorMessage )
def putFile( self, path, sourceSize = 0 ):
"""Put a copy of the local file to the current directory on the
physical storage
:param path: dictionnary {pfn (root://...) : localFile}
:param sourceSize : size in B (NOT USED)
:returns Successful dict {path : size}
Failed dict {path : error message }
S_ERROR(errMsg) in case of arguments problems
"""
if type( path ) is StringType:
return S_ERROR ( "XROOTStorage.putFile: path argument must be a dictionary (or a list of dictionary) { url : local path}" )
elif type( path ) is ListType:
if not len( path ):
return S_OK( { 'Failed' : {}, 'Successful' : {} } )
else:
urls = dict( [( url, False ) for url in path] )
elif type( path ) is DictType:
if len( path ) != 1:
return S_ERROR ( "XROOTStorage.putFile: path argument must be a dictionary (or a list of dictionary) { url : local path}" )
urls = path
failed = {}
successful = {}
for dest_url, src_file in urls.items():
res = self.__putSingleFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putSingleFile( self, src_file, dest_url, sourceSize = 0 ):
"""Put a copy of the local file to the current directory on the
physical storage
:param str dest_file: pfn (root://...)
:param str src_file: local file to copy
:param int sourceSize: size in B (NOT USED)
:returns: S_OK( file size ) if everything went fine, S_ERROR otherwise
"""
self.log.debug( "XROOTStorage.__putSingleFile: trying to upload %s to %s" % ( src_file, dest_url ) )
# We create the folder first
res = pfnparse( dest_url )
if not res['OK']:
return res
pfnDict = res['Value']
# There is a bug in xrootd-python-0.1.2-1 (fixed in master branch) which
# forbids the MAKEPATH flag to work.
status = self.xrootClient.mkdir( pfnDict['Path'], MkDirFlags.MAKEPATH )
# the API returns (status,None...)
status = status[0]
if status.fatal:
errStr = "XROOTStorage.__putSingleFile: Completely failed to create the destination folder."
gLogger.error( errStr, status.message )
return S_ERROR( errStr )
# if it is only an error(Folder exists...), we try to keep going
if status.error:
errStr = "XROOTStorage.__putSingleFile: failed to create the destination folder."
gLogger.debug( errStr, status.message )
# Now we check if there is already a remote file. If yes, we remove it
res = self.__singleExists( dest_url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if not res['OK']:
errStr = "XROOTStorage.__putSingleFile: failed to determine pre-existance of remote file."
gLogger.debug( errStr, res['Message'] )
# This is true only if the file exists. Then we remove it
if res['Value']:
self.log.debug( "XROOTStorage.__putSingleFile: Remote file exists and needs to be removed" )
res = self.__removeSingleFile( dest_url )
# Fatal error during removal
if not res['OK']:
return res
else:
res = res['Value']
if not res['OK']:
self.log.debug( "XROOTStorage.__putSingleFile: Failed to remove remote file", res['Message'] )
else:
self.log.debug( "XROOTStorage.__putSingleFile: Successfully removed remote file" )
# get the absolute path needed by the xroot api
src_file = os.path.abspath( src_file )
if not os.path.exists( src_file ):
errStr = "XROOTStorage.__putSingleFile: The local source file does not exist."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "XROOTStorage.__putSingleFile: Failed to get file size."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
# Perform the copy with the API
status = self.xrootClient.copy( src_file, dest_url )
# For some reason, the copy method returns a tuple (status,None)
status = status[0]
if status.ok:
self.log.debug( 'XROOTStorage.__putSingleFile: Put file on storage.' )
res = self.__getSingleFileSize( dest_url )
# There was a fatal error
if not res['OK']:
return res
# No fatal error, let see if we could get the size
res = res['Value']
if res['OK']: # we could get the size for that url
remoteSize = res['Value']
else:
errMsg = "XROOTStorage.__putSingleFile: Could not get remote file size"
self.log.error( errMsg, res['Value'] )
return S_ERROR( "Could not get remote file size" )
if sourceSize == remoteSize:
self.log.debug( "XROOTStorage.__putSingleFile: Post transfer check successful." )
return S_OK( sourceSize )
errorMessage = "XROOTStorage.__putSingleFile: Source and destination file sizes do not match (%s vs %s)." % ( sourceSize, remoteSize )
self.log.error( errorMessage, src_file )
else:
errorMessage = "XROOTStorage.__putSingleFile: Failed to put file on storage."
errStr = "%s %s" % ( status.message, status.errno )
self.log.error( errorMessage, errStr )
res = self.__singleExists( dest_url )
if not res['OK']:
return res
# This is true only if the file exists. Then we remove it
if res['Value'] == True:
self.log.debug( "XROOTStorage.__putSingleFile: Removing remote residual file.", dest_url )
res = self.__removeSingleFile( dest_url )
# Fatal error during removal
if not res['OK']:
return res
else:
res = res['Value']
if res['OK']:
self.log.debug( "XROOTStorage.__putSingleFile: Failed to remove remote file.", dest_url )
else:
self.log.debug( "XROOTStorage.__putSingleFile: Successfully removed remote file.", dest_url )
return S_ERROR( errorMessage )
def removeFile( self, path ):
"""Remove physically the file specified by its path
A non existing file will be considered as successfully removed.
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : True}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.removeFile: Attempting to remove %s files." % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__removeSingleFile( url )
# The removal did not have a big problem
if res['OK']:
res = res['Value']
# We could perform the removal
if res['OK']:
successful[url] = res['Value']
else:
failed [url] = res['Message']
else:
return res
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeSingleFile( self, path ):
"""Remove physically the file specified by its path
:param path: path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (True)) if the file is not present anymore (deleted or did not exist)
S_OK (S_ERROR (errorMsg)) if there was a problem removing the file
"""
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status = self.xrootClient.rm( xFilePath )
# For some reason, the rm method returns a tuple (status,None)
status = status[0]
if status.ok:
self.log.debug( "XROOTStorage.__removeSingleFile: Successfully removed file: %s" % path )
return S_OK( S_OK( True ) )
else:
# I don't know when the fatal flag is set, or if it is even ever set
if status.fatal:
errStr = "XROOTStorage.__removeSingleFile: Completely failed to remove the file."
self.log.fatal( errStr, "%s %s" % ( self.name, status.message ) )
return S_ERROR( errStr )
elif status.error:
# errno 3011 corresponds to the file not existing
if status.errno == 3011:
self.log.debug( "XROOTStorage.__removeSingleFile: File does not exist" )
return S_OK( S_OK( True ) )
else:
errStr = "XROOTStorage.__removeSingleFile: Failed to remove the file"
self.log.debug( errStr, status.message )
return S_OK( S_ERROR( errStr ) )
return S_ERROR ( "XROOTStorage.__removeSingleFile: reached the end of the method, should not happen" )
def getFileMetadata( self, path ):
""" Get metadata associated to the file(s)
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : metadata}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for url in urls:
res = self.__getSingleFileMetadata( url )
if not res['OK']:
errStr = "XROOTStorage.getPathMetadata: Completely failed to get path metadata."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# There were no fatal errors, so now we see if there were any other errors
res = res['Value']
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleMetadata( self, path, expectedType = None ):
""" Fetches the metadata of a single file or directory
If expectedType is None (default), then we fetch the metadata and return them.
If it is set, then we return a S_OK(boolean) depending on whether the type matches or not
:param self: self reference
:param path: path (only 1) on storage (pfn : root://...)
:param: expectedType : type that we expect the path to be ('File' or 'Directory')
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (MetadataDict)) if we could get the metadata
S_OK (S_OK (Bool)) if we could get the metadata and is of type expectedType
S_OK (S_ERROR (errorMsg)) if there was a problem geting the metadata
"""
if expectedType and expectedType not in ['File', 'Directory']:
return S_ERROR( "XROOTStorage.__getSingleMetadata : the 'expectedType' argument must be either 'File' or 'Directory'" )
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status, statInfo = self.xrootClient.stat( xFilePath )
if status.ok:
# Transform the api output into a dictionary
metadataDict = self.__parseStatInfoFromApiOutput( statInfo )
# Add metadata expected in some places but not provided by itself
metadataDict['Lost'] = 0
metadataDict['Cached'] = 1
metadataDict['Unavailable'] = 0
# If we expect a given type, we return a boolean
if expectedType:
isExpectedType = metadataDict[expectedType]
return S_OK( S_OK( isExpectedType ) )
# otherwise we return the metadata dictionnary
return S_OK( S_OK( metadataDict ) )
else:
# I don't know when the fatal flag is set, or if it is even ever set
if status.fatal:
errStr = "XROOTStorage.__getSingleMetadata: Completely failed to get path metadata."
self.log.fatal( errStr, "%s %s" % ( self.name, status.message ) )
return S_ERROR( errStr )
elif status.error:
# errno 3011 corresponds to the file not existing
if status.errno == 3011:
errStr = "XROOTStorage.__getSingleMetadata: Path does not exist"
else:
errStr = "XROOTStorage.__getSingleMetadata: Error in querying: %s" % status.message
self.log.debug( errStr )
return S_OK( S_ERROR( errStr ) )
return S_ERROR( "XROOTStorage.__getSingeFileMetadata : reached end of method. Should not happen" )
def __getSingleFileMetadata( self, path ):
""" Fetch the metadata associated to the file
:param self: self reference
:param path: path (only 1) on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (MetadataDict)) if we could get the metadata
S_OK (S_ERROR (errorMsg)) if there was a problem getting the metadata or if it is not a file
"""
res = self.__getSingleMetadata( path )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if not res['OK']:
return S_OK( res )
metadataDic = res['Value']
# If it is not a file
if not metadataDic['File']:
errStr = "XROOTStorage.__getSingleFileMetadata: Supplied path is not a file."
self.log.error( errStr, path )
return S_OK( S_ERROR( errStr ) )
return S_OK( S_OK( metadataDic ) )
def __parseStatInfoFromApiOutput( self, statInfo ):
""" Split the content of the statInfo object into a dictionary
:param self: self reference
:param statInfo: XRootD.client.responses.StatInfo returned by the API
:returns: a dictionary. List of keys :
ModTime (str)
ModTimeStr (str)
Id (int)
Size (int)
Executable (bool)
Directory (bool)
Other (bool)
File (bool)
Offline (bool)
PoscPending (bool)
Readable (bool)
Writable (bool)
"""
metadataDict = {'File' : False, 'Directory' : False}
metadataDict['ModTime'] = statInfo.modtime
metadataDict['ModTimeStr'] = statInfo.modtimestr
metadataDict['Id'] = statInfo.id
metadataDict['Size'] = statInfo.size
statFlags = statInfo.flags
metadataDict['Executable'] = bool( statFlags & StatInfoFlags.X_BIT_SET )
metadataDict['Directory'] = bool( statFlags & StatInfoFlags.IS_DIR )
metadataDict['Other'] = bool( statFlags & StatInfoFlags.OTHER )
metadataDict['File'] = ( not metadataDict['Other'] and not metadataDict['Directory'] )
metadataDict['Offline'] = bool( statFlags & StatInfoFlags.OFFLINE )
metadataDict['PoscPending'] = bool( statFlags & StatInfoFlags.POSC_PENDING )
metadataDict['Readable'] = bool( statFlags & StatInfoFlags.IS_READABLE )
metadataDict['Writable'] = bool( statFlags & StatInfoFlags.IS_WRITABLE )
return metadataDict
def getFileSize( self, path ):
"""Get the physical size of the given file
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : size}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for url in urls:
res = self.__getSingleFileSize( url )
# if there is a fatal error getting the size
if not res['OK']:
errStr = "XROOTStorage.getFileSize: Completely failed to get file size."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# There was no fatal error, so we see if we could get the size
res = res['Value']
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleFileSize( self, path ):
"""Get the physical size of the given file
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (size)) if we could get the size
S_OK (S_ERROR (errorMsg)) if there was a problem geting the size
"""
# We fetch all the metadata
res = self.__getSingleFileMetadata( path )
# If there was a fatal error
if not res['OK']:
errStr = "XROOTStorage.__getSingleFileSize: Completely failed to get file size."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# No fatal error, so we check if the api called succeded
res = res['Value']
# We could not get the metadata
if not res['OK']:
return S_OK( S_ERROR( res['Message'] ) )
else:
return S_OK( S_OK( res['Value']['Size'] ) )
def getTransportURL( self, path, protocols = False ):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage (pfn : root://...)
:param mixed protocols: protocols to use (must be or include 'root')
:returns Successful dict {path : path}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if protocols:
if type( protocols ) is StringType:
if protocols != self.protocol:
return S_ERROR( "getTransportURL: Must supply desired protocols to this plug-in (%s)." % self.protocol )
elif type( protocols ) is ListType:
if self.protocol not in protocols:
return S_ERROR( "getTransportURL: Must supply desired protocols to this plug-in (%s)." % self.protocol )
# For the time being, I assume I should not check whether the file exists or not
# So I just return the list of urls keys
successful = dict( [rootUrl, rootUrl] for rootUrl in urls )
failed = {}
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
##################################################################
#
# DO NOT REALLY MAKE SENSE FOR XROOT
#
##################################################################
def prestageFile( self, *parms, **kws ):
""" Issue prestage request for file
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
def prestageFileStatus( self, *parms, **kws ):
""" Obtain the status of the prestage request
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
def pinFile( self, *parms, **kws ):
""" Pin the file on the destination storage element
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
def releaseFile( self, *parms, **kws ):
""" Release the file on the destination storage element
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
#
##################################################################
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory( self, path ):
"""Check if the given path exists and it is a directory
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns: Successful dict {path : boolean}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.isDirectory: Determining whether %s paths are directories." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__isSingleDirectory( url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __isSingleDirectory( self, path ):
"""Check if the given path exists and it is a file
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (boolean)) if it is a directory or not
S_OK (S_ERROR (errorMsg)) if there was a problem geting the info
We could have called 'not __isSingleFile', but since the API
offers Directory, File and Other, we don't take the risk
"""
return self.__getSingleMetadata( path, 'Directory' )
def getDirectory( self, path, localPath = False ):
"""Get locally a directory from the physical storage together with all its
files and subdirectories.
:param: path: path (or list of path) on storage (pfn : root://...)
:param: localPath: local path where to store what is downloaded
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files downloaded, 'Size': amount of data downloaded}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
failed = {}
successful = {}
for src_dir in urls:
dirName = os.path.basename( src_dir )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
# The other storage objects use os.getcwd(), I think it is a bug
# -> no, self.cwd is remote
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getSingleDirectory( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
self.log.debug( "XROOTStorage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "XROOTStorage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "XROOTStorage.getDirectory: Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getSingleDirectory( self, src_dir, dest_dir ):
"""Download a single directory recursively
:param self: self reference
:param src_dir : remote directory to download (root://...)
:param dest_dir: local destination path
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could download something :
'AllGot': boolean of whether we could download everything
'Files': amount of files received
'Size': amount of data received
"""
self.log.debug( "XROOTStorage.__getSingleDirectory: Attempting to download directory %s at %s" % ( src_dir, dest_dir ) )
filesReceived = 0 # counter for the amount of files received
sizeReceived = 0 # counter for the data size received
# Check the remote directory exists
res = self.__isSingleDirectory( src_dir )
if not res['OK']:
errStr = "XROOTStorage.__getSingleDirectory: Completely failed (fatal error) to find the supplied source directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
# No fatal error, nested return
res = res['Value']
if not res['OK']:
errStr = "XROOTStorage.__getSingleDirectory: Failed to find the supplied source directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
# res['Value'] is True if it is a directory
if not res['Value']:
errStr = "XROOTStorage.__getSingleDirectory: The supplied source is not a directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
# Check the local directory exists and create it if not
if not os.path.exists( dest_dir ):
try:
os.makedirs( dest_dir )
except OSError, error:
errStr = "XROOTStorage.__getSingleDirectory: Exception creation the destination directory %s" % error
self.log.exception( errStr )
return S_ERROR( errStr )
# Get the remote directory contents
res = self.__listSingleDirectory( src_dir )
if not res['OK']:
errStr = "XROOTStorage.__getSingleDirectory: Failed to list the source directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
sFilesDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
# First get all the files in the directory
receivedAllFiles = True
self.log.debug( "XROOTStorage.__getSingleDirectory: Trying to first download the %s files." % len( sFilesDict ) )
for sFile in sFilesDict:
# Returns S__OK(Filesize) if it worked
res = self.__getSingleFile( sFile, "/".join( [ dest_dir, os.path.basename( sFile ) ] ) )
if res['OK']:
filesReceived += 1
sizeReceived += res['Value']
else:
receivedAllFiles = False
# Then recursively get the sub directories
receivedAllDirs = True
self.log.debug( "XROOTStorage.__getSingleDirectory: Trying to recursively download the %s folder." % len( subDirsDict ) )
for subDir in subDirsDict:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( dest_dir, subDirName )
res = self.__getSingleDirectory( subDir, localPath )
if not res['OK']:
receivedAllDirs = False
if res['OK']:
if not res['Value']['AllGot']:
receivedAllDirs = False
filesReceived += res['Value']['Files']
sizeReceived += res['Value']['Size']
# Check whether all the operations were successful
if receivedAllDirs and receivedAllFiles:
allGot = True
else:
allGot = False
resDict = {'AllGot':allGot, 'Files':filesReceived, 'Size':sizeReceived}
return S_OK( resDict )
def putDirectory( self, path ):
""" puts a or several local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param str path: dictionnary {pfn (root://...) : local dir}
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files uploaded, 'Size': amount of data uploaded}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "XROOTStorage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putSingleDirectory( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
self.log.debug( "XROOTStorage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "XROOTStorage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "XROOTStorage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putSingleDirectory( self, src_directory, dest_directory ):
""" puts one local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param src_directory : the local directory to copy
:param dest_directory: pfn (root://...) where to copy
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could upload something :
'AllPut': boolean of whether we could upload everything
'Files': amount of files uploaded
'Size': amount of data uploaded
"""
self.log.debug( "XROOTStorage.__putSingleDirectory: trying to upload %s to %s" % ( src_directory, dest_directory ) )
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "XROOTStorage.__putSingleDirectory: The supplied source directory does not exist or is not a directory."
self.log.error( errStr, src_directory )
return S_ERROR( errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
for fileName in contents:
self.log.debug( "FILENAME %s" % fileName )
localPath = '%s/%s' % ( src_directory, fileName )
remotePath = '%s/%s' % ( dest_directory, fileName )
if not os.path.isdir( localPath ):
directoryFiles[remotePath] = localPath
else:
res = self.__putSingleDirectory( localPath, remotePath )
if not res['OK']:
errStr = "XROOTStorage.__putSingleDirectory: Failed to put directory to storage."
self.log.error( errStr, res['Message'] )
else:
if not res['Value']['AllPut']:
allSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
self.log.error( "XROOTStorage.__putSingleDirectory: Failed to put files to storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut } )
def createDirectory( self, path ):
""" Make a/several new directory on the physical storage
This method creates all the intermediate directory
:param self: self reference
:param str path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : True}
Failed dict {path : error message }
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "XROOTStorage.createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
res = self.__createSingleDirectory( url )
if res['OK']:
self.log.debug( "XROOTStorage.createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
self.log.error( "XROOTStorage.createDirectory: Failed to create directory on storage.",
"%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __createSingleDirectory( self, path ):
""" Make a new directory on the physical storage
This method creates all the intermediate directory
:param self: self reference
:param str path: single path on storage (pfn : root://...)
:returns S_OK() if all went well
S_ERROR(errMsg) in case of any problem
"""
self.log.debug( "XROOTStorage.__createSingleDirectory: Attempting to create directory %s." % path )
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status = self.xrootClient.mkdir( xFilePath, MkDirFlags.MAKEPATH )
if status.ok:
return S_OK()
else:
if status.fatal:
errMsg = "XROOTStorage.__createSingleDir : Completely failed to create directory"
else:
errMsg = "XROOTStorage.__createSingleDir : failed to create directory"
self.log.error( errMsg, status.message )
return S_ERROR( errMsg )
def removeDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
:param path : single or list of path (root://..)
:param recursive : if True, we recursively delete the subdir
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files deleted, 'Size': amount of data deleted}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.removeDirectory: Attempting to remove %s directories." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__removeSingleDirectory( url, recursive )
if res['OK']:
if res['Value']['AllRemoved']:
self.log.debug( "XROOTStorage.removeDirectory: Successfully removed %s" % url )
successful[url] = {'FilesRemoved':res['Value']['FilesRemoved'], 'SizeRemoved':res['Value']['SizeRemoved']}
else:
self.log.error( "XROOTStorage.removeDirectory: Failed to remove entire directory.", path )
failed[url] = {'FilesRemoved':res['Value']['FilesRemoved'], 'SizeRemoved':res['Value']['SizeRemoved']}
else:
self.log.error( "XROOTStorage.removeDirectory: Completely failed to remove directory.", url )
failed[url] = {'FilesRemoved':0, 'SizeRemoved':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __removeSingleDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
:param path: pfn (root://...) of a directory to remove
:param recursive : if True, we recursively delete the subdir
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could upload something :
'AllRemoved': boolean of whether we could delete everything
'FilesRemoved': amount of files deleted
'SizeRemoved': amount of data deleted
"""
filesRemoved = 0
sizeRemoved = 0
# Check the remote directory exists
res = self.__isSingleDirectory( path )
if not res['OK']:
errStr = "XROOTStorage.__removeSingleDirectory: Completely failed (fatal error) to find the directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
# No fatal error, nested return
res = res['Value']
if not res['OK']:
errStr = "XROOTStorage.__removeSingleDirectory: Failed to find the directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
# res['Value'] is True if it is a directory
if not res['Value']:
errStr = "XROOTStorage.__removeSingleDirectory: The supplied path is not a directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
# Get the remote directory contents
res = self.__listSingleDirectory( path )
if not res['OK']:
errStr = "XROOTStorage.__removeSingleDirectory: Failed to list the directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
sFilesDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
removedAllFiles = True
removedAllDirs = True
allRemoved = True
# if recursive, we call ourselves on all the subdirs
if recursive:
# Recursively remove the sub directories
self.log.debug( "XROOTStorage.__removeSingleDirectory: Trying to recursively remove %s folder." % len( subDirsDict ) )
for subDir in subDirsDict:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( path, subDirName )
res = self.__removeSingleDirectory( localPath, recursive ) # recursive should be true..
if not res['OK']:
removedAllDirs = False
if res['OK']:
if not res['Value']['AllRemoved']:
removedAllDirs = False
filesRemoved += res['Value']['FilesRemoved']
sizeRemoved += res['Value']['SizeRemoved']
# Remove all the files in the directory
self.log.debug( "XROOTStorage.__removeSingleDirectory: Trying to remove %s files." % len( sFilesDict ) )
for sFile in sFilesDict:
# Returns S__OK(Filesize) if it worked
res = self.__removeSingleFile( sFile )
if not res['OK']:
return res
# Nothing fatal, nested structure
res = res['Value']
if res['OK']:
filesRemoved += 1
sizeRemoved += sFilesDict[sFile]['Size']
else:
removedAllFiles = False
# Check whether all the operations were successful
if removedAllDirs and removedAllFiles:
allRemoved = True
else:
allRemoved = False
# Now I try to remove the directory itself
# We do it only if :
# - we go recursive, and everything was deleted
# - we don't go recursive, but we deleted all files and there are no subfolders
if ( recursive and allRemoved ) or ( not recursive and removedAllFiles and ( len( subDirsDict ) == 0 ) ):
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status = self.xrootClient.rmdir( xFilePath )
# For some reason, the rmdir method returns a tuple (status,None)
status = status[0]
if not status.ok:
if status.errno == 3011:
errStr = "XROOTStorage.__removeSingleDirectory: File does not exist"
self.log.debug( errStr )
else:
errStr = "XROOTStorage.__removeSingleDirectory: Error in querying: %s" % status.message
self.log.debug( errStr )
allRemoved = False
resDict = {'AllRemoved': allRemoved, 'FilesRemoved': filesRemoved, 'SizeRemoved': sizeRemoved}
return S_OK( resDict )
def listDirectory( self, path ):
""" List the supplied path
CAUTION : It is not recursive!
:param path : single or list of path (root://..)
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary 'SubDirs' and 'Files'. Each are dictionaries with
path as key and metadata as values (for Files only, SubDirs has just True as value)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.listDirectory: Attempting to list %s directories." % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
successful = {}
failed = res['Value']['Failed']
directories = []
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories.append( url )
else:
errStr = "XROOTStorage.listDirectory: path is not a directory."
gLogger.error( errStr, url )
failed[url] = errStr
for directory in directories:
res = self.__listSingleDirectory( directory )
if not res['OK']:
failed[directory] = res['Message']
continue
successful[directory] = res['Value']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __listSingleDirectory( self, path ):
"""List the content of a single directory, NOT RECURSIVE
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is an error (fatal or not)
S_OK (dictionary)) The dictionnary has 2 keys : SubDirs and Files
The values of Files are dictionary with Filename as key and metadata as value
The values of SubDirs are just Dirname as key and True as value
"""
res = pfnparse( path )
if not res['OK']:
return res
self.log.debug( "XROOTStorage.__listSingleDirectory: Attempting to list directory %s." % path )
pfnDict = res['Value']
xFilePath = os.path.join( pfnDict['Path'], pfnDict['FileName'].strip( "/" ) )
status, listing = self.xrootClient.dirlist( xFilePath, DirListFlags.STAT )
if not status.ok:
errorMsg = "XROOTStorage.__listSingleDirectory : could not list the directory content"
self.log.error( errorMsg, status.message )
return S_ERROR ( errorMsg )
files = {}
subDirs = {}
for entry in listing:
fullPath = "root://%s/%s/%s" % ( self.host, xFilePath, entry.name )
metadataDict = self.__parseStatInfoFromApiOutput( entry.statinfo )
if metadataDict['Directory']:
subDirs[fullPath] = True
continue
elif metadataDict['File']:
files[fullPath] = metadataDict
else: # This "other", whatever that is
self.log.debug( "XROOTStorage.__listSingleDirectory : found an item which is not a file nor a directory", fullPath )
return S_OK( {'SubDirs' : subDirs, 'Files' : files } )
def __getSingleDirectoryMetadata( self, path ):
""" Fetch the metadata associated to the directory
:param self: self reference
:param path: path (only 1) on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (MetadataDict)) if we could get the metadata
S_OK (S_ERROR (errorMsg)) if there was a problem getting the metadata or if it is not a directory
"""
self.log.debug( "XROOTStorage.__getSingleDirectoryMetadata: Fetching metadata of directory %s." % path )
res = self.__getSingleMetadata( path )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if not res['OK']:
return S_OK( res )
metadataDic = res['Value']
# If it is not a file
if not metadataDic['Directory']:
errStr = "XROOTStorage.__getSingleDirectoryMetadata: Supplied path is not a directory."
self.log.error( errStr, path )
return S_OK( S_ERROR( errStr ) )
return S_OK( S_OK( metadataDic ) )
def getDirectoryMetadata( self, path ):
""" Get metadata associated to the directory(ies)
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : metadata}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getDirectoryMetadata: Attempting to fetch metadata of %s directories." % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleDirectoryMetadata( url )
if not res['OK']:
errStr = "XROOTStorage.getDirectoryMetadata: Completely failed to get path metadata."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# There were no fatal errors, so now we see if there were any other errors
res = res['Value']
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleDirectorySize( self, path ):
""" Get the size of the directory on the storage
CAUTION : the size is not recursive, and does not go into subfolders
:param self: self reference
:param path: path (single) on storage (pfn : root://...)
:return: S_ERROR in case of problem
S_OK (Dictionary) Files : amount of files in the directory
Size : summed up size of files
subDirs : amount of sub directories
"""
self.log.debug( "XROOTStorage.__getSingleDirectorySize: Attempting to get the size of directory %s" % path )
res = self.__listSingleDirectory( path )
if not res['OK']:
return res
directorySize = 0
directoryFiles = 0
# itervalues returns a list of values of the dictionnary
for fileDict in res['Value']['Files'].itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "XROOTStorage.__getSingleDirectorySize: Successfully obtained size of %s." % path )
subDirectories = len( res['Value']['SubDirs'] )
return S_OK( { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories } )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
CAUTION : the size is not recursive, and does not go into subfolders
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns: list of successfull and failed dictionnary, both indexed by the path
In the failed, the value is the error message
In the successful the values are dictionnaries : Files : amount of files in the directory
Size : summed up size of files
subDirs : amount of sub directories
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleDirectorySize( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
| vmendez/DIRAC | Resources/Storage/XROOTStorage.py | Python | gpl-3.0 | 56,979 | [
"DIRAC"
] | 5f51d3715b17f591271c9f17821f0b4bebba56b20145127b98a01b3c64b8c804 |
# Makes print and division act like Python 3
from __future__ import print_function, division
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from copy import deepcopy
#from .nrc_utils import S, stellar_spectrum, jupiter_spec, cond_table, cond_filter
#from .nrc_utils import read_filter, bp_2mass, channel_select, coron_ap_locs
#from .nrc_utils import dist_image, pad_or_cut_to_size
from .nrc_utils import *
from .obs_nircam import obs_hci
#from .obs_nircam import plot_contrasts, plot_contrasts_mjup, planet_mags, plot_planet_patches
from tqdm.auto import tqdm, trange
import logging
_log = logging.getLogger('nb_funcs')
import pynrc
pynrc.setup_logging('WARN', verbose=False)
"""
Common functions for notebook simulations and plotting.
This is my attempt to standardize these routines over
all the various GTO programs.
"""
# Observation Definitions
# Functions to create and optimize a series of observation objects stored as a dictionary.
bp_k = bp_2mass('k')
def make_key(filter, pupil=None, mask=None):
"""Create identification key (string) based on filter, pupil, and mask"""
mask_key = 'none' if mask is None else mask
pupil_key = 'none' if pupil is None else pupil
key = '{}_{}_{}'.format(filter,mask_key,pupil_key)
return key
# Disk Models
def model_info(source, filt, dist, model_dir=''):
# base_dir = '/Volumes/NIRData/Andras_models_v2/'
# model_dir = base_dir + source + '/'
# Match filters with model
filt_switch = {'F182M':'F210M', 'F210M':'F210M', 'F250M':'F250M',
'F300M':'F300M', 'F335M':'F335M', 'F444W':'F444W'}
filt_model = filt_switch.get(filt, filt)
fname = source + '_' + filt_model +'sc.fits'
bp = read_filter(filt_model)
w0 = bp.avgwave() / 1e4
# Model pixels are 4x oversampled
detscale = (channel_select(bp))[0]
model_scale = detscale / 4.
# File name, arcsec/pix, dist (pc), wavelength (um), flux units, cen_star?
model_dict = {
'file' : model_dir+fname,
'pixscale' : model_scale,
'dist' : dist,
'wavelength' : w0,
'units' : 'Jy/pixel',
'cen_star' : True
}
# args_model = (model_dir+fname, model_scale, dist, w0, 'Jy/pixel', True)
return model_dict
def disk_rim_model(a_asec, b_asec, pa=0, sig_asec=0.1, flux_frac=0.5,
flux_tot=1.0, flux_units='mJy', wave_um=None, dist_pc=None,
pixsize=0.007, fov_pix=401):
"""
Simple geometric model of an inner disk rim that simply creates an
ellipsoidal ring with a brightness gradient along the major axis.
Parameters
----------
a_asec : float
Semi-major axis of ellipse
ba_asec : float
Semi-minor axis of ellipse
Keyword Args
------------
pa : float
Position angle of major axis
sig_asec : float
Sigma width of ring model
flux_frac : float
A brightness gradient can be applied along the semi-major axis.
This parameter dictates the relative brightness of the minimum flux
(at the center of the axis) compared to the flux at the out edge
of the geometric ring.
flux_tot : float
The total integrated flux of disk model.
flux_units : str
Units corresponding to `flux_tot`.
wave_um : float or None
Wavelength (in um) corresponding to `flux_tot`. Saved in output
FITS header unless the value is None.
dist_pc : float or None
Assumed distance of model (in pc). Saved in output FITS header
unless the value is None.
pixsize : float
Desired model pixel size in arcsec.
fov_pix : int
Number of pixels for x/y dimensions of output model data.
"""
from astropy.modeling.models import Ellipse2D
from astropy.convolution import Gaussian2DKernel, convolve_fft
from astropy.io import fits
# Get polar and cartesian pixel coordinate grid
sh = (fov_pix, fov_pix)
r_pix, th_ang = dist_image(np.ones(sh), return_theta=True)
x_pix, y_pix = rtheta_to_xy(r_pix, th_ang)
# In terms of arcsec
x_asec = pixsize * x_pix
y_asec = pixsize * y_pix
r_asec = pixsize * r_pix
# Semi major/minor axes (pix)
a_pix = a_asec / pixsize
b_pix = b_asec / pixsize
# Create ellipse functions
e1 = Ellipse2D(theta=0, a=a_pix+1, b=b_pix+1)
e2 = Ellipse2D(theta=0, a=a_pix-1, b=b_pix-1)
# Make the two ellipse images and subtract
e1_im = e1(x_pix,y_pix)
e2_im = e2(x_pix,y_pix)
e_im = e1_im - e2_im
# Produce a brightness gradient along major axis
grad_im = (1-flux_frac) * np.abs(x_pix) / a_pix + flux_frac
e_im = e_im * grad_im
# Convolve image with Gaussian to simulate scattering
sig_pix = sig_asec / pixsize
kernel = Gaussian2DKernel(sig_pix)
e_im = convolve_fft(e_im, kernel)
# Rotate
th_deg = pa - 90.
e_im = rotate_offset(e_im, angle=-th_deg, order=3, reshape=False)
e_im = flux_tot * e_im / np.sum(e_im)
hdu = fits.PrimaryHDU(e_im)
hdu.header['PIXELSCL'] = (pixsize, "Pixel Scale (asec/pix)")
hdu.header['UNITS'] = "{}/pixel".format(flux_units)
if wave_um is not None:
hdu.header['WAVE'] = (wave_um, "Wavelength (microns)")
if dist_pc is not None:
hdu.header['DISTANCE'] = (dist_pc, "Distance (pc)")
return fits.HDUList([hdu])
def obs_wfe(wfe_ref_drift, filt_list, sp_sci, dist, sp_ref=None, args_disk=None,
wind_mode='WINDOW', subsize=None, fov_pix=None, verbose=False, narrow=False,
model_dir=None, large_grid=False, **kwargs):
"""
For a given WFE drift and series of filters, create a list of
NIRCam observations.
"""
if sp_ref is None: sp_ref = sp_sci
obs_dict = {}
for filt, mask, pupil in filt_list:
# Create identification key
key = make_key(filt, mask=mask, pupil=pupil)
print(key)
# Disk Model
if args_disk is None:
args_disk_temp = None
elif 'auto' in args_disk:
# Convert to photons/sec in specified filter
args_disk_temp = model_info(sp_sci.name, filt, dist, model_dir=model_dir)
else:
args_disk_temp = args_disk
fov_pix_orig = fov_pix
# Define the subarray readout size
if 'FULL' in wind_mode: # Full frame
subuse = 2048
# Define PSF pixel size defaults
if mask is None:
fov_pix = 400 if fov_pix is None else fov_pix
elif ('210R' in mask) or ('SWB' in mask):
fov_pix = 640 if fov_pix is None else fov_pix
else:
fov_pix = 320 if fov_pix is None else fov_pix
elif subsize is None: # Window Mode defaults
if mask is None: # Direct Imaging
subuse = 400
elif ('210R' in mask) or ('SWB' in mask): # SW Coronagraphy
subuse = 640
else: # LW Coronagraphy
subuse = 320
else: # No effect if full frame
subuse = subsize
# Define PSF pixel size
fov_pix = subuse if fov_pix is None else fov_pix
# Make sure fov_pix is odd for direct imaging
# if (mask is None) and (np.mod(fov_pix,2)==0):
# fov_pix += 1
if np.mod(fov_pix,2)==0:
fov_pix += 1
# Other coronagraph vs direct imaging settings
module, oversample = ('B', 4) if mask is None else ('A', 2)
if narrow and ('SWB' in mask):
bar_offset=-8
elif narrow and ('LWB' in mask):
bar_offset=8
else:
bar_offset=None
# Initialize and store the observation
# A reference observation is stored inside each parent obs_hci class.
obs = obs_hci(sp_sci, dist, sp_ref=sp_ref, filter=filt, image_mask=mask, pupil_mask=pupil,
module=module, wind_mode=wind_mode, xpix=subuse, ypix=subuse,
wfe_ref_drift=wfe_ref_drift, fov_pix=fov_pix, oversample=oversample,
disk_params=args_disk_temp, verbose=verbose, bar_offset=bar_offset,
autogen_coeffs=False, **kwargs)
obs.gen_psf_coeff()
# Enable WFE drift
obs.gen_wfedrift_coeff()
# Enable mask-dependent
obs.gen_wfemask_coeff(large_grid=large_grid)
obs_dict[key] = obs
fov_pix = fov_pix_orig
# if there's a disk input, then we want to remove disk
# contributions from stellar flux and recompute to make
# sure total flux counts matches what we computed for
# sp_sci in previous section to match real photometry
if args_disk is not None:
obs = obs_dict[key]
star_flux = obs.star_flux(sp=sp_sci) # Pass original input spectrum
disk_flux = obs.disk_hdulist[0].data.sum()
obs.sp_sci = sp_sci * (1 - disk_flux / star_flux)
obs.sp_sci.name = sp_sci.name
if sp_ref is sp_sci:
obs.sp_ref = obs.sp_sci
# Generation mask position dependent PSFs
for key in tqdm(obs_dict.keys(), desc='Obs', leave=False):
obs_dict[key].gen_disk_psfs()
return obs_dict
def obs_optimize(obs_dict, sp_opt=None, well_levels=None, tacq_max=1800, **kwargs):
"""
Perform ramp optimization on each science and reference observation
in a list of filter observations. Updates the detector MULTIACCUM
settings for each observation in the dictionary.
snr_goal = 5
snr_frac = 0.02
tacq_max = 1400
tacq_frac = 0.01
nint_min = 15
ng_max = 10
"""
# A very faint bg object on which to maximize S/N
# If sp_opt is not set, then default to a 20th magnitude flat source
if sp_opt is None:
sp_opt = stellar_spectrum('flat', 20, 'vegamag', bp_k)
# Some observations may saturate, so define a list of maximum well level
# values that we will incrementally check until a ramp setting is found
# that meets the contraints.
if well_levels is None:
well_levels = [0.8, 1.5, 3.0, 5.0, 10.0, 20.0, 100.0, 150.0, 300.0, 500.0]
filt_keys = list(obs_dict.keys())
filt_keys.sort()
print(['Pattern', 'NGRP', 'NINT', 't_int', 't_exp', 't_acq', 'SNR', 'Well', 'eff'])
for j, key in enumerate(filt_keys):
print('')
print(key)
obs = obs_dict[key]
obs_ref = obs.nrc_ref
sp_sci, sp_ref = (obs.sp_sci, obs.sp_ref)
# SW filter piggy-back on two LW filters, so 2 x tacq
is_SW = obs.bandpass.avgwave()/1e4 < 2.5
# Ramp optimization for both science and reference targets
for j, sp in enumerate([sp_sci, sp_ref]):
i = nrow = 0
while nrow==0:
well_max = well_levels[i]
tbl = obs.ramp_optimize(sp_opt, sp, well_frac_max=well_max, tacq_max=tacq_max, **kwargs)
nrow = len(tbl)
i+=1
# Grab the highest ranked MULTIACCUM settings and update the detector readout
v1, v2, v3 = tbl['Pattern', 'NGRP', 'NINT'][0]
vals = list(tbl[0])#.as_void()
strout = '{:10} {:4.0f} {:4.0f}'.format(vals[0], vals[1], vals[2])
for v in vals[3:]:
strout = strout + ', {:.4f}'.format(v)
print(strout)
# SW filter piggy-back on two LW filters, so 2 x tacq
# is_SW = obs.bandpass.avgwave()/1e4 < 2.5
# if is_SW:
# v3 *= 2
# Coronagraphic observations have two roll positions, so cut NINT by 2
if obs.image_mask is not None:
v3 = int(v3/2)
obs2 = obs if j==0 else obs_ref
obs2.update_detectors(read_mode=v1, ngroup=v2, nint=v3)
###########################################
# Functions to run a series of operations
###########################################
# Optimize observations
def do_opt(obs_dict, tacq_max=1800, **kwargs):
sp_opt = stellar_spectrum('flat', 20, 'vegamag', bp_k)
obs_optimize(obs_dict, sp_opt=sp_opt, tacq_max=tacq_max, **kwargs)
# For each filter setting, generate a series of contrast curves at different WFE values
def do_contrast(obs_dict, wfe_list, filt_keys, nsig=5, roll_angle=10, verbose=False, **kwargs):
"""
kwargs to pass to calc_contrast() and their defaults:
no_ref = False
func_std = robust.medabsdev
exclude_disk = True
exclude_planets = True
exclude_noise = False
opt_diff = True
fix_sat = False
ref_scale_all = False
"""
contrast_all = {}
for i in trange(len(filt_keys), desc='Observations'):
key = filt_keys[i]
obs = obs_dict[key]
if verbose:
print(key)
wfe_roll_temp = obs.wfe_roll_drift
wfe_ref_temp = obs.wfe_ref_drift
# Stores tuple of (Radial Distances, Contrast, and Sensitivity) for each WFE drift
curves = []
for wfe_drift in tqdm(wfe_list, leave=False, desc='WFE Drift'):
if ('no_ref' in list(kwargs.keys())) and (kwargs['no_ref']==True):
obs.wfe_roll_drift = wfe_drift
else:
obs.wfe_ref_drift = wfe_drift
result = obs.calc_contrast(roll_angle=roll_angle, nsig=nsig, **kwargs)
curves.append(result)
obs.wfe_roll_drift = wfe_roll_temp
obs.wfe_ref_drift = wfe_ref_temp
contrast_all[key] = curves
return contrast_all
def do_gen_hdus(obs_dict, filt_keys, wfe_ref_drift, wfe_roll_drift,
return_oversample=True, **kwargs):
"""
kwargs to pass to gen_roll_image() and their defaults:
PA1 = 0
PA2 = 10
zfact = None
return_oversample = True
exclude_disk = False
exclude_noise = False
no_ref = False
opt_diff = True
use_cmask = False
ref_scale_all = False
xyoff_roll1 = None
xyoff_roll2 = None
xyoff_ref = None
"""
hdulist_dict = {}
for key in tqdm(filt_keys):
# if verbose: print(key)
obs = obs_dict[key]
use_cmask = kwargs.pop('use_cmask', False)
hdulist = obs.gen_roll_image(return_oversample=return_oversample, use_cmask=use_cmask,
wfe_ref_drift=wfe_ref_drift, wfe_roll_drift=wfe_roll_drift, **kwargs)
hdulist_dict[key] = hdulist
return hdulist_dict
def do_sat_levels(obs, satval=0.95, ng_min=2, ng_max=None, verbose=True,
plot=True, xylim=2.5, return_fig_axes=False):
"""Only for obs.hci classes"""
ng_max = obs.det_info['ngroup'] if ng_max is None else ng_max
kw_gen_psf = {'return_oversample': False,'return_hdul': False}
# Well level of each pixel for science source
image = obs.calc_psf_from_coeff(sp=obs.sp_sci, **kw_gen_psf)
sci_levels1 = obs.saturation_levels(ngroup=ng_min, image=image)
sci_levels2 = obs.saturation_levels(ngroup=ng_max, image=image)
# Well level of each pixel for reference source
image = obs.calc_psf_from_coeff(sp=obs.sp_ref, **kw_gen_psf)
ref_levels1 = obs.saturation_levels(ngroup=ng_min, image=image, do_ref=True)
ref_levels2 = obs.saturation_levels(ngroup=ng_max, image=image, do_ref=True)
# Which pixels are saturated?
sci_mask1 = sci_levels1 > satval
sci_mask2 = sci_levels2 > satval
# Which pixels are saturated?
ref_mask1 = ref_levels1 > satval
ref_mask2 = ref_levels2 > satval
# How many saturated pixels?
nsat1_sci = len(sci_levels1[sci_mask1])
nsat2_sci = len(sci_levels2[sci_mask2])
# How many saturated pixels?
nsat1_ref = len(ref_levels1[ref_mask1])
nsat2_ref = len(ref_levels2[ref_mask2])
# Get saturation radius
if nsat1_sci == nsat1_ref == 0:
sat_rad = 0
else:
mask_temp = sci_mask1 if nsat1_sci>nsat1_ref else ref_mask1
rho_asec = dist_image(mask_temp, pixscale=obs.pix_scale)
sat_rad = rho_asec[mask_temp].max()
if verbose:
print('Sci: {}'.format(obs.sp_sci.name))
print(' {} saturated pixel at NGROUP={}; Max Well: {:.2f}'\
.format(nsat1_sci, ng_min, sci_levels1.max()))
print(' {} saturated pixel at NGROUP={}; Max Well: {:.2f}'\
.format(nsat2_sci, ng_max, sci_levels2.max()))
print(' Sat Dist NG={}: {:.2f} arcsec'.format(ng_min, sat_rad))
print('Ref: {}'.format(obs.sp_ref.name))
print(' {} saturated pixel at NGROUP={}; Max Well: {:.2f}'.\
format(nsat1_ref, ng_min, ref_levels1.max()))
print(' {} saturated pixel at NGROUP={}; Max Well: {:.2f}'.\
format(nsat2_ref, ng_max, ref_levels2.max()))
if (nsat2_sci==nsat2_ref==0) and (plot==True):
plot=False
print('Plotting turned off; no saturation detected.')
if plot:
fig, axes_all = plt.subplots(2,2, figsize=(8,8))
xlim = ylim = np.array([-1,1])*xylim
# Plot science source
nsat1, nsat2 = (nsat1_sci, nsat2_sci)
sat_mask1, sat_mask2 = (sci_mask1, sci_mask2)
sp = obs.sp_sci
xpix, ypix = (obs.det_info['xpix'], obs.det_info['ypix'])
bar_offpix = obs.bar_offset / obs.pixelscale
if ('FULL' in obs.det_info['wind_mode']) and (obs.image_mask is not None):
cdict = coron_ap_locs(obs.module, obs.channel, obs.image_mask, full=True)
xcen, ycen = cdict['cen_V23']
xcen += bar_offpix
else:
xcen, ycen = (xpix/2 + bar_offpix, ypix/2)
# rho = dist_image(sci_mask1, center=(xcen,ycen))
delx, dely = (xcen - xpix/2, ycen - ypix/2)
extent_pix = np.array([-xpix/2-delx,xpix/2-delx,-ypix/2-dely,ypix/2-dely])
extent = extent_pix * obs.pix_scale
axes = axes_all[0]
axes[0].imshow(sat_mask1, extent=extent)
axes[1].imshow(sat_mask2, extent=extent)
axes[0].set_title('{} Saturation (NGROUP=2)'.format(sp.name))
axes[1].set_title('{} Saturation (NGROUP={})'.format(sp.name,ng_max))
for ax in axes:
ax.set_xlabel('Arcsec')
ax.set_ylabel('Arcsec')
ax.tick_params(axis='both', color='white', which='both')
for k in ax.spines.keys():
ax.spines[k].set_color('white')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Plot ref source sat mask
nsat1, nsat2 = (nsat1_ref, nsat2_ref)
sat_mask1, sat_mask2 = (ref_mask1, ref_mask2)
sp = obs.sp_ref
axes = axes_all[1]
axes[0].imshow(sat_mask1, extent=extent)
axes[1].imshow(sat_mask2, extent=extent)
axes[0].set_title('{} Saturation (NGROUP=2)'.format(sp.name))
axes[1].set_title('{} Saturation (NGROUP={})'.format(sp.name,ng_max))
for ax in axes:
ax.set_xlabel('Arcsec')
ax.set_ylabel('Arcsec')
ax.tick_params(axis='both', color='white', which='both')
for k in ax.spines.keys():
ax.spines[k].set_color('white')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
fig.tight_layout()
if return_fig_axes and plot:
return (fig, axes), sat_rad
else:
return sat_rad
###########################################
# Simulated Data
###########################################
def average_slopes(hdulist):
"""
For a series of ramps, calculate the slope images then average together.
"""
ramps = hdulist[1].data
header = hdulist[0].header
slopes_fin = []
for i in range(len(ramps)):
data = ramps[i]
# Create time array
ng, ypix, xpix = data.shape
tvals = (np.arange(ng)+1) * header['TGROUP']
# Flatten image space to 1D
data = data.reshape([ng,-1])
# Make saturation mask
sat_val = 0.95*data.max()
sat_mask = data > sat_val
# Create slope images
# Cycle through groups using only unsaturated pixels
im_slope = np.zeros_like(data[0]) - 10
for i in np.arange(1,ng)[::-1]:
ind = (im_slope==-10) & (~sat_mask[i])
if np.any(ind): # Check if any pixels are still True
im_slope[ind] = jl_poly_fit(tvals, data[:,ind])[1]
#print(im_slope[ind].shape)
# Special case of only first frame unsaturated
ind = (im_slope==-10) & (~sat_mask[0])
im_slope[ind] = data[:,ind] / tvals[0]
#print(im_slope[ind].shape)
# If saturated on first frame, set to NaN
ind = sat_mask[0]
im_slope[ind] = np.nan
#print(im_slope[ind].shape)
data = data.reshape([ng,ypix,xpix])
im_slope = im_slope.reshape([ypix,xpix])
slopes_fin.append(im_slope)
# Average slopes together
# us nanmean() to ignore those with NaNs
slopes_fin = np.array(slopes_fin)
slope_final = np.nanmean(slopes_fin, axis=0)
return slope_final
###########################################
# Plotting images and contrast curves
###########################################
def plot_contrasts_mjup(curves, nsig, wfe_list, obs=None, sat_rad=None, age=100,
ax=None, colors=None, xr=[0,10], yr=None, file=None, linder_models=True,
twin_ax=False, return_axes=False, **kwargs):
"""Plot mass contrast curves
Plot a series of mass contrast curves for corresponding WFE drifts.
Parameters
----------
curves : list
A list with length corresponding to `wfe_list`. Each list element
has three arrays in a tuple: the radius in arcsec, n-sigma contrast,
and n-sigma sensitivity limit (vega mag).
nsig : float
N-sigma limit corresponding to sensitivities/contrasts.
wfe_list : array-like
List of WFE drift values corresponding to each set of sensitivities
in `curves` argument.
Keyword Args
------------
obs : :class:`obs_hci`
Corresponding observation class that created the contrast curves.
Uses distances and stellar magnitude to plot contrast and AU
distances on opposing axes. Also necessary for mjup=True.
sat_rad : float
Saturation radius in arcsec. If >0, then that part of the contrast
curve is excluded from the plot
age : float
Required for plotting limiting planet masses.
file : string
Location and name of COND or Linder isochrone file.
ax : matplotlib.axes
Axes on which to plot curves.
colors : None, array-like
List of colors for contrast curves. Default is gradient of blues.
twin_ax : bool
Plot opposing axes in alternate units.
return_axes : bool
Return the matplotlib axes to continue plotting. If `obs` is set,
then this returns three sets of axes.
"""
if sat_rad is None:
sat_rad = 0
if ax is None:
fig, ax = plt.subplots()
if colors is None:
lin_vals = np.linspace(0.2,0.8,len(wfe_list))
colors = plt.cm.Blues_r(lin_vals)
filt = obs.filter
mod = obs.module
dist = obs.distance
if linder_models:
# Grab Linder model data
tbl = linder_table(file=file)
mass_data, mag_data = linder_filter(tbl, filt, age, dist=dist)
else:
# Grab COND model data
tbl = cond_table(age=age, file=file)
mass_data, mag_data = cond_filter(tbl, filt, module=mod, dist=dist)
# Plot the data
isort = np.argsort(mag_data)
for j, wfe_ref_drift in enumerate(wfe_list):
rr, contrast, mag_sens = curves[j]
label='$\Delta$' + "WFE = {} nm".format(wfe_list[j])
# Interpolate in log space
xv, yv = mag_data[isort], np.log10(mass_data[isort])
xint = mag_sens
yint = np.interp(xint, xv, yv)
# Choose the lowest mass value brighter than the given mag limits
yvals = np.array([np.min(yint[xint<=xv]) for xv in xint])
yvals = 10**yvals
xvals = rr[rr>sat_rad]
yvals = yvals[rr>sat_rad]
ax.plot(xvals, yvals, label=label, color=colors[j], zorder=1, lw=2)
if xr is not None: ax.set_xlim(xr)
if yr is not None: ax.set_ylim(yr)
ax.xaxis.get_major_locator().set_params(nbins=10, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=10, steps=[1, 2, 5, 10])
ylabel = 'Mass Limits ($M_{\mathrm{Jup}}$)'
ax.set_ylabel(ylabel)
ax.set_xlabel('Separation (arcsec)')
if twin_ax:
# Plot opposing axes in alternate units
yr2 = np.array(ax.get_ylim()) * 318.0 # Convert to Earth masses
ax2 = ax.twinx()
ax2.set_ylim(yr2)
ax2.set_ylabel('Earth Masses')
ax3 = ax.twiny()
xr3 = np.array(ax.get_xlim()) * obs.distance
ax3.set_xlim(xr3)
ax3.set_xlabel('Separation (AU)')
ax3.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
if return_axes:
return (ax, ax2, ax3)
else:
if return_axes:
return ax
def plot_contrasts(curves, nsig, wfe_list, obs=None, sat_rad=None, ax=None,
colors=None, xr=[0,10], yr=[25,5], return_axes=False):
"""Plot contrast curves
Plot a series of contrast curves for corresponding WFE drifts.
Parameters
----------
curves : list
A list with length corresponding to `wfe_list`. Each list element
has three arrays in a tuple: the radius in arcsec, n-sigma contrast,
and n-sigma sensitivity limit (vega mag).
nsig : float
N-sigma limit corresponding to sensitivities/contrasts.
wfe_list : array-like
List of WFE drift values corresponding to each set of sensitivities
in `curves` argument.
Keyword Args
------------
obs : :class:`obs_hci`
Corresponding observation class that created the contrast curves.
Uses distances and stellar magnitude to plot contrast and AU
distances on opposing axes.
sat_rad : float
Saturation radius in arcsec. If >0, then that part of the contrast
curve is excluded from the plot
ax : matplotlib.axes
Axes on which to plot curves.
colors : None, array-like
List of colors for contrast curves. Default is gradient of blues.
return_axes : bool
Return the matplotlib axes to continue plotting. If `obs` is set,
then this returns three sets of axes.
"""
if sat_rad is None:
sat_rad = 0
if ax is None:
fig, ax = plt.subplots()
if colors is None:
lin_vals = np.linspace(0.3,0.8,len(wfe_list))
colors = plt.cm.Blues_r(lin_vals)
for j in range(len(wfe_list)): #for j, wfe_ref_drift in enumerate(wfe_list):
rr, contrast, mag_sens = curves[j]
xvals = rr[rr>sat_rad]
yvals = mag_sens[rr>sat_rad]
label='$\Delta$' + "WFE = {} nm".format(wfe_list[j])
ax.plot(xvals, yvals, label=label, color=colors[j], zorder=1, lw=2)
if xr is not None: ax.set_xlim(xr)
if yr is not None: ax.set_ylim(yr)
ax.xaxis.get_major_locator().set_params(nbins=10, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=10, steps=[1, 2, 5, 10])
ax.set_ylabel('{:.0f}-$\sigma$ Sensitivities (mag)'.format(nsig))
ax.set_xlabel('Separation (arcsec)')
# Plot opposing axes in alternate units
if obs is not None:
yr1 = np.array(ax.get_ylim())
yr2 = 10**((obs.star_flux('vegamag') - yr1) / 2.5)
ax2 = ax.twinx()
ax2.set_yscale('log')
ax2.set_ylim(yr2)
ax2.set_ylabel('{:.0f}-$\sigma$ Contrast'.format(nsig))
ax3 = ax.twiny()
xr3 = np.array(ax.get_xlim()) * obs.distance
ax3.set_xlim(xr3)
ax3.set_xlabel('Separation (AU)')
ax3.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
if return_axes:
return (ax, ax2, ax3)
else:
if return_axes:
return ax
def planet_mags(obs, age=10, entropy=13, mass_list=[10,5,2,1], av_vals=[0,25], atmo='hy3s',
cond=False, linder=False, **kwargs):
"""Exoplanet Magnitudes
Determine a series of exoplanet magnitudes for given observation.
By default, use Spiegel & Burrows 2012 models, but has the option
to use the COND models from https://phoenix.ens-lyon.fr/Grids.
These are useful because SB12 model grids only ranges from 1-1000 Myr
with masses 1-15 MJup.
cond : bool
Instead of plotting sensitivities, use COND models to plot the
limiting planet masses.
linder : bool
Instead of plotting sensitivities, use Linder models to plot the
limiting planet masses.
file : string
Location and name of COND or Linder file.
"""
if av_vals is None:
av_vals = [0,0]
pmag = {}
for i,m in enumerate(mass_list):
flux_list = []
for j,av in enumerate(av_vals):
sp = obs.planet_spec(mass=m, age=age, Av=av, entropy=entropy, atmo=atmo, **kwargs)
sp_obs = S.Observation(sp, obs.bandpass, binset=obs.bandpass.wave)
flux = sp_obs.effstim('vegamag')
flux_list.append(flux)
pmag[m] = tuple(flux_list)
# Do COND models instead
# But still want SB12 models to get A_V information
if cond or linder:
# All mass and mag data for specified filter
filt = obs.filter
mod = obs.module
dist = obs.distance
if linder:
tbl = linder_table(**kwargs)
mass_data, mag_data = linder_filter(tbl, filt, age, dist=dist, **kwargs)
else:
# Grab COND model data
tbl = cond_table(age=age, **kwargs)
mass_data, mag_data = cond_filter(tbl, filt, module=mod, dist=dist, **kwargs)
# Mag information for the requested masses
isort = np.argsort(mass_data)
xv, yv = np.log10(mass_data[isort]), mag_data[isort]
mags0 = np.interp(np.log10(mass_list), np.log10(mass_data[isort]), mag_data[isort])
# Apply extinction
for i, m in enumerate(mass_list):
if np.allclose(av_vals, 0):
dm = np.array([0,0])
else:
#SB12 at A_V=0
sp = obs.planet_spec(mass=m, age=age, Av=0, entropy=entropy, atmo=atmo, **kwargs)
sp_obs = S.Observation(sp, obs.bandpass, binset=obs.bandpass.wave)
sb12_mag = sp_obs.effstim('vegamag')
# Get magnitude offset due to extinction
dm = np.array(pmag[m]) - sb12_mag
dm2 = pmag[m][1] - sb12_mag
# Apply extinction to COND models
pmag[m] = tuple(mags0[i] + dm)
return pmag
def plot_planet_patches(ax, obs, age=10, entropy=13, mass_list=[10,5,2,1], av_vals=[0,25],
cols=None, update_title=False, linder=False, **kwargs):
"""Plot exoplanet magnitudes in region corresponding to extinction values."""
import matplotlib.patches as mpatches
# Don't plot anything if
if mass_list is None:
_log.info("mass_list=None; Not plotting planet patch locations.")
return
xlim = ax.get_xlim()
#lin_vals = np.linspace(0,0.5,4)
#cols = plt.cm.Purples_r(lin_vals)[::-1]
if cols is None:
cols = plt.cm.tab10(np.linspace(0,1,10))
dist = obs.distance
if entropy<8: entropy=8
if entropy>13: entropy=13
pmag = planet_mags(obs, age, entropy, mass_list, av_vals, linder=linder, **kwargs)
for i,m in enumerate(mass_list):
label = 'Mass = {} '.format(m) + '$M_{\mathrm{Jup}}$'
if av_vals is None:
ax.plot(xlim, pmag[m], color=cols[i], lw=1, ls='--', label=label)
else:
pm_min, pm_max = pmag[m]
rect = mpatches.Rectangle((xlim[0], pm_min), xlim[1], pm_max-pm_min,
alpha=0.2, color=cols[i], label=label, zorder=2)
ax.add_patch(rect)
ax.plot(xlim, [pm_min]*2, color=cols[i], lw=1, alpha=0.3)
ax.plot(xlim, [pm_max]*2, color=cols[i], lw=1, alpha=0.3)
entropy_switch = {13:'Hot', 8:'Cold'}
entropy_string = entropy_switch.get(entropy, "Warm")
ent_str = 'BEX Models' if linder else '{} Start'.format(entropy_string)
if av_vals is None:
av_str = ''
else:
av_str = ' ($A_V = [{:.0f},{:.0f}]$)'.format(av_vals[0],av_vals[1])
#age_str = 'Age = {:.0f} Myr; '.format(age)
#dist_str = 'Dist = {:.1f} pc; '.format(dist) if dist is not None else ''
#dist_str=""
#ax.set_title('{} -- {} ({}{}{})'.format(obs.filter,ent_str,age_str,dist_str,av_str))
if update_title:
ax.set_title('{} -- {}{}'.format(obs.filter,ent_str,av_str))
def plot_hdulist(hdulist, ext=0, xr=None, yr=None, ax=None, return_ax=False,
cmap=None, scale='linear', vmin=None, vmax=None, axes_color='white',
half_pix_shift=False, cb_label='Counts/sec', **kwargs):
from webbpsf import display_psf
if ax is None:
fig, ax = plt.subplots()
if cmap is None:
cmap = matplotlib.rcParams['image.cmap']
# This has to do with even/odd number of pixels in array.
# Usually everything is centered in the middle of a pixel
# and for odd array sizes that is where (0,0) will be plotted.
# However, even array sizes will have (0,0) at the pixel border,
# so this just shifts the entire image accordingly.
if half_pix_shift:
oversamp = hdulist[ext].header['OSAMP']
shft = 0.5*oversamp
hdul = deepcopy(hdulist)
hdul[0].data = fshift(hdul[0].data, shft, shft)
else:
hdul = hdulist
data = hdul[ext].data
if vmax is None:
vmax = 0.75 * np.nanmax(data) if scale=='linear' else np.nanmax(data)
if vmin is None:
vmin = 0 if scale=='linear' else vmax/1e6
out = display_psf(hdul, ext=ext, ax=ax, title='', cmap=cmap,
scale=scale, vmin=vmin, vmax=vmax, return_ax=True, **kwargs)
try:
ax, cb = out
cb.set_label(cb_label)
except:
ax = out
ax.set_xlim(xr)
ax.set_ylim(yr)
ax.set_xlabel('Arcsec')
ax.set_ylabel('Arcsec')
ax.tick_params(axis='both', color=axes_color, which='both')
for k in ax.spines.keys():
ax.spines[k].set_color(axes_color)
ax.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
if return_ax:
return ax
###########################################
# Plotting images and contrast curves
###########################################
def update_yscale(ax, scale_type, ylim=None):
# Some fancy log+linear plotting
from matplotlib.ticker import FixedLocator, ScalarFormatter, LogFormatterSciNotation
if scale_type=='symlog':
ylim = [0,100] if ylim is None else ylim
ax.set_ylim(ylim)
yr = ax.get_ylim()
ax.set_yscale('symlog', linthreshy=10, linscaley=2)
ax.set_yticks(list(range(0,10)) + [10,100,1000])
#ax.get_yaxis().set_major_formatter(ScalarFormatter())
ax.yaxis.set_major_formatter(ScalarFormatter())
minor_log = list(np.arange(20,100,10)) + list(np.arange(200,1000,100))
minorLocator = FixedLocator(minor_log)
ax.yaxis.set_minor_locator(minorLocator)
ax.set_ylim([0,yr[1]])
elif scale_type=='log':
ax.set_yscale('log')
ylim = [0.1,100] if ylim is None else ylim
ax.set_ylim(ylim)
ax.yaxis.set_major_formatter(LogFormatterSciNotation())
elif 'lin' in scale_type:
ax.set_yscale('linear')
ylim = [0,100] if ylim is None else ylim
ax.set_ylim(ylim)
def do_plot_contrasts(curves_ref, curves_roll, nsig, wfe_list, obs, age, age2=None,
sat_rad=0, jup_mag=True, xr=[0,10], yr=[22,8], xr2=[0,10], yscale2='log', yr2=None,
save_fig=False, outdir='', return_fig_axes=False, **kwargs):
"""
Plot series of contrast curves.
"""
if (curves_ref is None) and (curves_roll is None):
_log.warning('Both curves set no none. Returning...')
return
lin_vals = np.linspace(0.2,0.8,len(wfe_list))
c1 = plt.cm.Blues_r(lin_vals)
c2 = plt.cm.Reds_r(lin_vals)
c3 = plt.cm.Purples_r(lin_vals)
c4 = plt.cm.Greens_r(lin_vals)
fig, axes = plt.subplots(1,2, figsize=(14,4.5))
ax = axes[0]
if curves_ref is not None:
ax1, ax2, ax3 = plot_contrasts(curves_ref, nsig, wfe_list,
obs=obs, ax=ax, colors=c1, xr=xr, yr=yr, return_axes=True)
if curves_roll is not None:
obs_kw = None if curves_ref is not None else obs
axes2 = plot_contrasts(curves_roll, nsig, wfe_list,
obs=obs_kw, ax=ax, colors=c2, xr=xr, yr=yr, return_axes=True)
if curves_ref is None:
ax1, ax2, ax3 = axes2
axes1_all = [ax1, ax2, ax3]
#plot_planet_patches(ax, obs, age=age, av_vals=None, cond=True)
#ax.set_ylim([22,8])
# Legend organization
nwfe = len(wfe_list)
if curves_ref is None:
ax.legend(loc='upper right', title='Roll Sub')
elif curves_roll is None:
ax.legend(loc='upper right', title='Ref Sub')
else:
handles, labels = ax.get_legend_handles_labels()
h1 = handles[0:nwfe][::-1]
h2 = handles[nwfe:][::-1]
h1_t = [mpatches.Patch(color='none', label='Ref Sub')]
h2_t = [mpatches.Patch(color='none', label='Roll Sub')]
handles_new = h1_t + h1 + h2_t + h2
ax.legend(ncol=2, handles=handles_new, loc='upper right')
# Magnitude of Jupiter at object's distance
if jup_mag:
jspec = jupiter_spec(dist=obs.distance)
jobs = S.Observation(jspec, obs.bandpass, binset=obs.bandpass.wave)
jmag = jobs.effstim('vegamag')
if jmag<np.max(ax.get_ylim()):
ax.plot(xr, [jmag,jmag], color='C2', ls='--')
txt = 'Jupiter at {:.1f} pc'.format(obs.distance)
ax.text(xr[0]+0.02*(xr[1]-xr[0]), jmag, txt, horizontalalignment='left', verticalalignment='bottom')
# Plot in terms of Jupiter Masses
ax = axes[1]
age1 = age
if curves_ref is not None:
ax1, ax2, ax3 = plot_contrasts_mjup(curves_ref, nsig, wfe_list, obs=obs,
age=age1, ax=ax, colors=c1, xr=xr2, twin_ax=True, yr=None, return_axes=True)
if curves_roll is not None:
twin_kw = False if curves_ref is not None else True
axes2 = plot_contrasts_mjup(curves_roll, nsig, wfe_list, obs=obs,
age=age1, ax=ax, colors=c2, xr=xr2, twin_ax=twin_kw, yr=None, return_axes=True)
if curves_ref is None:
ax1, ax2, ax3 = axes2
axes2_all = [ax1, ax2, ax3]
if age2 is not None:
if curves_ref is not None:
plot_contrasts_mjup(curves_ref, nsig, wfe_list, obs=obs, age=age2, ax=ax, colors=c3, xr=xr2, yr=None)
if curves_roll is not None:
plot_contrasts_mjup(curves_roll, nsig, wfe_list, obs=obs, age=age2, ax=ax, colors=c4, xr=xr2, yr=None)
# Legend organization
handles, labels = ax.get_legend_handles_labels()
if curves_ref is None:
handles_new = [handles[i*nwfe] for i in range(2)]
labels_new = ['Roll Sub ({:.0f} Myr)'.format(age1),
'Roll Sub ({:.0f} Myr)'.format(age2)
]
elif curves_roll is None:
handles_new = [handles[i*nwfe] for i in range(2)]
labels_new = ['Ref Sub ({:.0f} Myr)'.format(age1),
'Ref Sub ({:.0f} Myr)'.format(age2)
]
else:
handles_new = [handles[i*nwfe] for i in range(4)]
labels_new = ['Ref Sub ({:.0f} Myr)'.format(age1),
'Roll Sub ({:.0f} Myr)'.format(age1),
'Ref Sub ({:.0f} Myr)'.format(age2),
'Roll Sub ({:.0f} Myr)'.format(age2),
]
else:
handles, labels = ax.get_legend_handles_labels()
if curves_ref is None:
handles_new = [handles[0]]
labels_new = ['Roll Sub ({:.0f} Myr)'.format(age1)]
elif curves_roll is None:
handles_new = [handles[0]]
labels_new = ['Ref Sub ({:.0f} Myr)'.format(age1)]
else:
handles_new = [handles[i*nwfe] for i in range(2)]
labels_new = ['Ref Sub ({:.0f} Myr)'.format(age1),
'Roll Sub ({:.0f} Myr)'.format(age1),
]
ax.legend(handles=handles_new, labels=labels_new, loc='upper right', title='COND Models')
# Update fancing y-axis scaling on right plot
update_yscale(ax, yscale2, ylim=yr2)
yr_temp = np.array(ax.get_ylim()) * 318.0
update_yscale(axes2_all[1], yscale2, ylim=yr_temp)
# Saturation regions
if sat_rad > 0:
sat_rad_asec = sat_rad
for ax in axes:
ylim = ax.get_ylim()
rect = mpatches.Rectangle((0, ylim[0]), sat_rad, ylim[1]-ylim[0], alpha=0.2, color='k', zorder=2)
ax.add_patch(rect)
name_sci = obs.sp_sci.name
name_ref = obs.sp_ref.name
if curves_ref is None:
title_str = '{} (dist = {:.1f} pc) -- {} Contrast Curves'\
.format(name_sci, obs.distance, obs.filter)
else:
title_str = '{} (dist = {:.1f} pc; PSF Ref: {}) -- {} Contrast Curves'\
.format(name_sci, obs.distance, name_ref, obs.filter)
fig.suptitle(title_str, fontsize=16)
fig.tight_layout()
fig.subplots_adjust(top=0.85, bottom=0.1 , left=0.05, right=0.95)
fname = "{}_contrast_{}.pdf".format(name_sci.replace(" ", ""), obs.image_mask)
if save_fig:
fig.savefig(outdir+fname)
if return_fig_axes:
return fig, (axes1_all, axes2_all)
def do_plot_contrasts2(key1, key2, curves_all, nsig, obs_dict, wfe_list, age, sat_dict=None,
label1='Curves1', label2='Curves2', xr=[0,10], yr=[24,8],
yscale2='log', yr2=None, av_vals=[0,10], curves_all2=None,
c1=None, c2=None, linder_models=True, planet_patches=True, **kwargs):
fig, axes = plt.subplots(1,2, figsize=(14,4.5))
lin_vals = np.linspace(0.2,0.8,len(wfe_list))
if c1 is None: c1 = plt.cm.Blues_r(lin_vals)
if c2 is None: c2 = plt.cm.Reds_r(lin_vals)
c3 = plt.cm.Purples_r(lin_vals)
c4 = plt.cm.Greens_r(lin_vals)
# Left plot (5-sigma sensitivities)
ax = axes[0]
k = key1
curves = curves_all[k]
obs = obs_dict[k]
sat_rad = None if sat_dict is None else sat_dict[k]
ax, ax2, ax3 = plot_contrasts(curves, nsig, wfe_list, obs=obs, sat_rad=sat_rad,
ax=ax, colors=c1, xr=xr, yr=yr, return_axes=True)
axes1_all = [ax, ax2, ax3]
if key2 is not None:
k = key2
curves = curves_all[k] if curves_all2 is None else curves_all2[k]
obs = None
sat_rad = None if sat_dict is None else sat_dict[k]
plot_contrasts(curves, nsig, wfe_list, obs=obs, sat_rad=sat_rad,
ax=ax, xr=xr, yr=yr, colors=c2)
# Planet mass locations
if planet_patches:
plot_planet_patches(ax, obs_dict[key1], age=age, update_title=True, av_vals=av_vals,
linder=linder_models, **kwargs)
ax.set_title('Flux Sensitivities')
# Right plot (Converted to MJup/MEarth)
ax = axes[1]
k = key1
curves = curves_all[k]
obs = obs_dict[k]
sat_rad = None if sat_dict is None else sat_dict[k]
ax, ax2, ax3 = plot_contrasts_mjup(curves, nsig, wfe_list, obs=obs, age=age, sat_rad=sat_rad,
ax=ax, colors=c1, xr=xr, twin_ax=True, return_axes=True,
linder_models=linder_models)
axes2_all = [ax, ax2, ax3]
if key2 is not None:
k = key2
curves = curves_all[k] if curves_all2 is None else curves_all2[k]
obs = obs_dict[k]
sat_rad = None if sat_dict is None else sat_dict[k]
plot_contrasts_mjup(curves, nsig, wfe_list, obs=obs, age=age, sat_rad=sat_rad,
ax=ax, colors=c2, xr=xr, linder_models=linder_models)
mod_str = 'BEX' if linder_models else 'COND'
ax.set_title('Mass Sensitivities -- {} Models'.format(mod_str))
# Update fancy y-axis scaling on right plot
ax = axes2_all[0]
update_yscale(ax, yscale2, ylim=yr2)
yr_temp = np.array(ax.get_ylim()) * 318.0
update_yscale(axes2_all[1], yscale2, ylim=yr_temp)
# Left legend
nwfe = len(wfe_list)
ax=axes[0]
handles, labels = ax.get_legend_handles_labels()
h1 = handles[0:nwfe][::-1]
h2 = handles[nwfe:2*nwfe][::-1]
h3 = handles[2*nwfe:]
h1_t = [mpatches.Patch(color='none', label=label1)]
h2_t = [mpatches.Patch(color='none', label=label2)]
h3_t = [mpatches.Patch(color='none', label='{} ({})'.format(mod_str, obs_dict[key1].filter))]
if planet_patches:
if key2 is not None:
handles_new = h1_t + h1 + h2_t + h2 + h3_t + h3
ncol = 3
else:
h3 = handles[nwfe:]
handles_new = h1_t + h1 + h3_t + h3
ncol = 2
else:
if key2 is not None:
handles_new = h1_t + h1 + h2_t + h2
ncol = 2
else:
handles_new = h1_t + h1
ncol = 1
ax.legend(ncol=ncol, handles=handles_new, loc=1, fontsize=9)
# Right legend
ax=axes[1]
handles, labels = ax.get_legend_handles_labels()
h1 = handles[0:nwfe][::-1]
h2 = handles[nwfe:2*nwfe][::-1]
h1_t = [mpatches.Patch(color='none', label=label1)]
h2_t = [mpatches.Patch(color='none', label=label2)]
if key2 is not None:
handles_new = h1_t + h1 + h2_t + h2
ncol = 2
else:
handles_new = h1_t + h1
ncol = 1
ax.legend(ncol=ncol, handles=handles_new, loc=1, fontsize=9)
# Title
name_sci = obs.sp_sci.name
dist = obs.distance
age_str = 'Age = {:.0f} Myr'.format(age)
dist_str = 'Distance = {:.1f} pc'.format(dist) if dist is not None else ''
title_str = '{} ({}, {})'.format(name_sci,age_str,dist_str)
fig.suptitle(title_str, fontsize=16);
fig.tight_layout()
fig.subplots_adjust(top=0.8, bottom=0.1 , left=0.05, right=0.95)
return (fig, (axes1_all, axes2_all))
def plot_images(obs_dict, hdu_dict, filt_keys, wfe_drift, fov=10,
save_fig=False, outdir='', return_fig_axes=False):
nfilt = len(filt_keys)
ext_name = ['Model', 'Sim Image (linear scale)', 'Sim Image ($r^2$ scale)']
nim = len(ext_name)
fig, axes = plt.subplots(nfilt, nim, figsize=(8.5,6.5))
#axes = axes.transpose()
for j, k in enumerate(filt_keys):
obs = obs_dict[k]
hdu_mod = obs.disk_hdulist
if hdu_mod is None:
raise ValueError('Disk model image is None. Did you forget to add the disk image?')
hdu_sim = hdu_dict[k]
data = hdu_sim[0].data
data -= np.nanmedian(data)
# Make r^2 scaled version of data
hdu_sim_r2 = deepcopy(hdu_sim)
data = hdu_sim_r2[0].data
data -= np.nanmedian(data)
header = hdu_sim_r2[0].header
rho = dist_image(data, pixscale=header['PIXELSCL'])
data *= rho**2
# Max value for model
data_mod = hdu_mod[0].data
header_mod = hdu_mod[0].header
# Scale to data pixelscale
data_mod = frebin(data_mod, scale=header_mod['PIXELSCL']/header['PIXELSCL'])
rho_mod = dist_image(data_mod, pixscale=header['PIXELSCL'])
data_mod_r2 = data_mod*rho_mod**2
vmax = np.max(data_mod)
vmax2 = np.max(data_mod_r2)
# Scale value for data
im_temp = pad_or_cut_to_size(data_mod, hdu_sim[0].data.shape)
mask_good = im_temp>(0.1*vmax)
scl1 = np.nanmedian(hdu_sim[0].data[mask_good] / im_temp[mask_good])
scl1 = np.abs(scl1)
# Scale value for r^2 version
im_temp = pad_or_cut_to_size(data_mod_r2, hdu_sim_r2[0].data.shape)
mask_good = im_temp>(0.1*vmax2)
scl2 = np.nanmedian(hdu_sim_r2[0].data[mask_good] / im_temp[mask_good])
scl2 = np.abs(scl2)
vmax_vals = [vmax, vmax*scl1, vmax2*scl2]
hdus = [hdu_mod, hdu_sim, hdu_sim_r2]
for i, ax in enumerate(axes[j]):
hdulist = hdus[i]
data = hdulist[0].data
header = hdulist[0].header
pixscale = header['PIXELSCL']
rho = dist_image(data, pixscale=pixscale)
rad = data.shape[0] * pixscale / 2
extent = [-rad, rad, -rad, rad]
ax.imshow(data, vmin=0, vmax=0.9*vmax_vals[i], extent=extent)
ax.set_aspect('equal')
if i > 0: ax.set_yticklabels([])
if j < nfilt-1: ax.set_xticklabels([])
if j==nfilt-1: ax.set_xlabel('Arcsec')
if j==0: ax.set_title(ext_name[i])
if i==0:
texp = obs.multiaccum_times['t_exp']
texp = round(2*texp/100)*100
exp_text = "{:.0f} sec".format(texp)
ax.set_ylabel('{} ({})'.format(obs.filter, exp_text))
xlim = [-fov/2,fov/2]
ylim = [-fov/2,fov/2]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.get_major_locator().set_params(nbins=10, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=10, steps=[1, 2, 5, 10])
ax.tick_params(axis='both', color='white', which='both')
for k in ax.spines.keys():
ax.spines[k].set_color('white')
name_sci = obs.sp_sci.name
wfe_text = "WFE Drift = {} nm".format(wfe_drift)
fig.suptitle('{} ({})'.format(name_sci, wfe_text), fontsize=16);
fig.tight_layout()
fig.subplots_adjust(wspace=0.05, hspace=0.05, top=0.9, bottom=0.1)
#fig.subplots_adjust(wspace=0.1, hspace=0.1, top=0.9, bottom=0.07 , left=0.05, right=0.97)
fname = "{}_images_{}.pdf".format(name_sci.replace(" ", ""), obs.image_mask)
if save_fig:
fig.savefig(outdir+fname)
if return_fig_axes:
return fig, axes
def plot_images_swlw(obs_dict, hdu_dict, filt_keys, wfe_drift, fov=10,
save_fig=False, outdir='', return_fig_axes=False):
nfilt = len(filt_keys)
ext_name = ['Model', 'Sim Image (linear scale)', 'Sim Image ($r^2$ scale)']
nim = len(ext_name)
fig, axes = plt.subplots(nim, nfilt, figsize=(14,7.5))
axes = axes.transpose()
for j, k in enumerate(filt_keys):
obs = obs_dict[k]
hdu_mod = obs.disk_hdulist
if hdu_mod is None:
raise ValueError('Disk model image is None. Did you forget to add the disk image?')
hdu_sim = hdu_dict[k]
data = hdu_sim[0].data
data -= np.nanmedian(data)
# Make r^2 scaled version of data
hdu_sim_r2 = deepcopy(hdu_sim)
data = hdu_sim_r2[0].data
data -= np.nanmedian(data)
header = hdu_sim_r2[0].header
rho = dist_image(data, pixscale=header['PIXELSCL'])
data *= rho**2
# Max value for model
data_mod = hdu_mod[0].data
header_mod = hdu_mod[0].header
# Scale to data pixelscale
data_mod = frebin(data_mod, scale=header_mod['PIXELSCL']/header['PIXELSCL'])
rho_mod = dist_image(data_mod, pixscale=header['PIXELSCL'])
data_mod_r2 = data_mod*rho_mod**2
vmax = np.max(data_mod)
vmax2 = np.max(data_mod_r2)
# Scale value for data
im_temp = pad_or_cut_to_size(data_mod, hdu_sim[0].data.shape)
mask_good = im_temp>(0.1*vmax)
scl1 = np.nanmedian(hdu_sim[0].data[mask_good] / im_temp[mask_good])
scl1 = np.abs(scl1)
# Scale value for r^2 version
im_temp = pad_or_cut_to_size(data_mod_r2, hdu_sim_r2[0].data.shape)
mask_good = im_temp>(0.1*vmax2)
scl2 = np.nanmedian(hdu_sim_r2[0].data[mask_good] / im_temp[mask_good])
scl2 = np.abs(scl2)
vmax_vals = [vmax,vmax*scl1,vmax2*scl2]
hdus = [hdu_mod, hdu_sim, hdu_sim_r2]
for i, ax in enumerate(axes[j]):
hdulist = hdus[i]
data = hdulist[0].data
header = hdulist[0].header
pixscale = header['PIXELSCL']
rho = dist_image(data, pixscale=pixscale)
rad = data.shape[0] * pixscale / 2
extent = [-rad, rad, -rad, rad]
ax.imshow(data, vmin=0, vmax=0.9*vmax_vals[i], extent=extent)
ax.set_aspect('equal')
if j > 0: ax.set_yticklabels([])
if i < nim-1: ax.set_xticklabels([])
if i==nim-1: ax.set_xlabel('Arcsec')
if j==0: ax.set_ylabel(ext_name[i])
if i==0:
texp = obs.multiaccum_times['t_exp']
texp = round(2*texp/100)*100
exp_text = "{:.0f} sec".format(texp)
ax.set_title('{} ({})'.format(obs.filter, exp_text))
xlim = [-fov/2,fov/2]
ylim = [-fov/2,fov/2]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
if fov<=2*rad:
ax.tick_params(axis='both', color='white', which='both')
for k in ax.spines.keys():
ax.spines[k].set_color('white')
name_sci = obs.sp_sci.name
wfe_text = "WFE Drift = {} nm".format(wfe_drift)
fig.suptitle('{} ({})'.format(name_sci, wfe_text), fontsize=16);
fig.tight_layout()
fig.subplots_adjust(wspace=0.1, hspace=0.1, top=0.9, bottom=0.07 , left=0.05, right=0.97)
fname = "{}_images_{}.pdf".format(name_sci.replace(" ", ""), obs.image_mask)
if save_fig:
fig.savefig(outdir+fname)
if return_fig_axes:
return fig, axes
| JarronL/pynrc | pynrc/nb_funcs.py | Python | mit | 54,609 | [
"Gaussian"
] | 153620e141963795442e69723c9a8a39376767994fb128fd2e6845a8ef4ee034 |
# coding: utf-8
"""
Demonstrate how to use MPI with kombine. Run this module with
mpiexec -n <nprocesses> python mpi-demo.py
where <nprocesses> is the number of processes to spawn.
"""
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import kombine
import numpy as np
from scipy.stats import multivariate_normal
class Model(object):
def __init__(self, mean, cov):
self.mean = np.atleast_1d(mean)
self.cov = np.array(cov)
self.ndim = self.cov.shape[0]
def lnposterior(self, x):
return multivariate_normal.logpdf(x, mean=self.mean, cov=self.cov)
def __call__(self, x):
return self.lnposterior(x)
ndim = 3
A = np.random.rand(ndim, ndim)
mean = np.zeros(ndim)
cov = A*A.T + ndim*np.eye(ndim)
# create an ND Gaussian model
model = Model(mean, cov)
nwalkers = 500
sampler = kombine.Sampler(nwalkers, ndim, model, mpi=True)
p0 = np.random.uniform(-10, 10, size=(nwalkers, ndim))
p, post, q = sampler.burnin(p0)
p, post, q = sampler.run_mcmc(100)
| bfarr/kombine | examples/mpi-demo.py | Python | mit | 1,082 | [
"Gaussian"
] | 49b929909d80869aecd3b19954deac0c073a19d60f14c9bd255d232caf572933 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
from functools import partial
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params : boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| meduz/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 46,724 | [
"Gaussian"
] | cc013557800356ff9967657fbcbd0e8100bac0e82fcada3502a83921ce4ed229 |
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
import rdkit.Numerics.rdAlignment as rdAlg
from rdkit import Geometry
from rdkit import RDConfig
import os,sys
import unittest
import numpy.oldnumeric as Numeric
import math
import copy
def lstFeq(l1, l2, tol=1.e-4):
if (len(list(l1)) != len(list(l2))):
return 0
for i in range(len(list(l1))):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def transformPoint(trans, pt):
pt2 = copy.copy(list(pt))
pt2.append(1.0)
pt2 = Numeric.array(pt2)
res = Numeric.dot(trans, pt2)
return res[:3]
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Basic(self):
# passing two numeric arrays
refPts = Numeric.zeros((2,3), Numeric.Float)
prbPts = Numeric.zeros((2,3), Numeric.Float)
refPts[1,0] = 1.0
prbPts[0,0] = 2.0
prbPts[0,1] = 2.0
prbPts[1,0] = 2.0
prbPts[1,1] = 3.0
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 0.0))
refLst = list(refPts)
cnt = 0
for item in list(prbPts):
self.failUnless(lstFeq(transformPoint(res[1], item), refLst[cnt]))
cnt+= 1
# repeat with with lists or tuples
refPts = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))
prbPts = ((2.0, 2.0, 0.0), (2.0, 3.0, 0.0))
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 0.0))
refPts = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
prbPts = [[2.0, 2.0, 0.0], [2.0, 3.0, 0.0]]
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 0.0))
# mix it up
refPts = Numeric.zeros((2,3), Numeric.Float)
refPts[1,0] = 1.0
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 0.0))
def test2Weights(self) :
refPts = Numeric.array([[-math.cos(math.pi/6), -math.sin(math.pi/6), 0.0],
[math.cos(math.pi/6), -math.sin(math.pi/6), 0.0],
[0.0, 1.0, 0.0]], Numeric.Float)
prbPts = Numeric.array([[-2*math.sin(math.pi/6) + 3.0, 2*math.cos(math.pi/6), 4.0],
[-2*math.sin(math.pi/6) + 3.0, -2*math.cos(math.pi/6), 4.0],
[5.0, 0.0, 4.0]], Numeric.Float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 3.0))
target = [[-1.732, -1., 0.],
[1.732, -1., 0.],
[0., 2., 0.]]
cnt = 0
for item in list(prbPts):
self.failUnless(lstFeq(transformPoint(res[1], item), target[cnt]))
cnt += 1
weights = Numeric.array([1.0, 1.0, 2.0], Numeric.Float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts, weights)
self.failUnless(feq(res[0], 3.75))
cnt = 0
target = [[-1.732, -1.25, 0.],
[1.732, -1.25, 0.],
[0., 1.75, 0.]]
for item in list(prbPts):
self.failUnless(lstFeq(transformPoint(res[1], item), target[cnt]))
cnt += 1
weights = [1.0, 1.0, 2.0]
res = rdAlg.GetAlignmentTransform(refPts, prbPts, weights)
self.failUnless(feq(res[0], 3.75))
weights = [1.0, 2.0, 2.0]
res = rdAlg.GetAlignmentTransform(refPts, prbPts, weights)
self.failUnless(feq(res[0], 4.8))
def test3tetra(self) :
refPts = Numeric.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]], Numeric.Float)
prbPts = Numeric.array([[2.0, 2.0, 3.0],
[3.0, 2.0, 3.0],
[2.0, 3.0, 3.0]], Numeric.Float)
self.failUnlessRaises(ValueError,lambda : rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = Numeric.array([[2.0, 2.0, 3.0],
[3.0, 2.0, 3.0],
[2.0, 3.0, 3.0],
[2.0, 2.0, 4.0]], Numeric.Float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 0.0))
wts = [1.0, 1.0, 1.0]
self.failUnlessRaises(ValueError,lambda : rdAlg.GetAlignmentTransform(refPts, prbPts, wts))
wts = [1.0, 1.0, 1.0, 1.0]
res = rdAlg.GetAlignmentTransform(refPts, prbPts, wts)
self.failUnless(feq(res[0], 0.0))
# test reflection
prbPts = Numeric.array([[2.0, 2.0, 3.0],
[3.0, 2.0, 3.0],
[2.0, 2.0, 4.0],
[2.0, 3.0, 3.0]], Numeric.Float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts, wts)
self.failUnless(feq(res[0], 1.0))
res = rdAlg.GetAlignmentTransform(refPts, prbPts, wts, 1)
self.failUnless(feq(res[0], 0.0))
cnt = 0
refLst = list(refPts)
for item in list(prbPts):
self.failUnless(lstFeq(transformPoint(res[1], item), refLst[cnt]))
cnt += 1
def test4points(self) :
refPts = (Geometry.Point3D(0.0, 0.0, 0.0),
Geometry.Point3D(1.0, 0.0, 0.0),
Geometry.Point3D(0.0, 1.0, 0.0),
Geometry.Point3D(0.0, 0.0, 1.0),
)
prbPts = (Geometry.Point3D(2.0, 2.0, 3.0),
Geometry.Point3D(3.0, 2.0, 3.0),
Geometry.Point3D(2.0, 3.0, 3.0),
Geometry.Point3D(2.0, 2.0, 4.0),
)
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.failUnless(feq(res[0], 0.0))
def test5errorHandling(self) :
refPts = (Geometry.Point3D(0.0, 0.0, 0.0),
Geometry.Point3D(1.0, 0.0, 0.0),
Geometry.Point3D(0.0, 1.0, 0.0),
Geometry.Point3D(0.0, 0.0, 1.0),
)
prbPts = (1,2,3,4,)
self.failUnlessRaises(ValueError,
lambda : rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = ()
self.failUnlessRaises(ValueError,
lambda : rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = 1
self.failUnlessRaises(ValueError,
lambda : rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = (Geometry.Point3D(2.0, 2.0, 3.0),
Geometry.Point3D(3.0, 2.0, 3.0),
Geometry.Point3D(2.0, 3.0, 3.0),
(2.0, 2.0, 5.0),
)
self.failUnlessRaises(ValueError,
lambda : rdAlg.GetAlignmentTransform(refPts, prbPts))
if __name__ == '__main__':
print "Testing Alignment Wrapper code:"
unittest.main()
| rdkit/rdkit-orig | Code/Numerics/Alignment/Wrap/testAlignment.py | Python | bsd-3-clause | 7,036 | [
"RDKit"
] | 62ab60d0f94e33c5c9db1cb05cafb857954b97fe86b8fc20f2c98e84a5f6fff9 |
#!/usr/bin/env python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import sys
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
| isislovecruft/leekspin | versioneer.py | Python | mit | 25,770 | [
"Brian"
] | 6c8c351b97dafad5f4787047dd08f1adf58bd5fe6350b199e7028bd6eaf508e6 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the base aperture classes.
"""
import abc
import copy
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as u
from .bounding_box import BoundingBox
from ._photometry_utils import (_handle_units, _prepare_photometry_data,
_validate_inputs)
from ..utils._wcs_helpers import _pixel_scale_angle_at_skycoord
__all__ = ['Aperture', 'SkyAperture', 'PixelAperture']
class Aperture(metaclass=abc.ABCMeta):
"""
Abstract base class for all apertures.
"""
_shape_params = ()
positions = np.array(())
theta = None
def __len__(self):
if self.isscalar:
raise TypeError(f'A scalar {self.__class__.__name__!r} object '
'has no len()')
return self.shape[0]
def __getitem__(self, index):
if self.isscalar:
raise TypeError(f'A scalar {self.__class__.__name__!r} object '
'cannot be indexed')
kwargs = dict()
for param in self._shape_params:
kwargs[param] = getattr(self, param)
return self.__class__(self.positions[index], **kwargs)
def __iter__(self):
for i in range(len(self)):
yield self.__getitem__(i)
def _positions_str(self, prefix=None):
if isinstance(self, PixelAperture):
return np.array2string(self.positions, separator=', ',
prefix=prefix)
elif isinstance(self, SkyAperture):
return repr(self.positions)
else:
raise TypeError('Aperture must be a subclass of PixelAperture '
'or SkyAperture')
def __repr__(self):
prefix = f'{self.__class__.__name__}'
cls_info = [self._positions_str(prefix)]
if self._shape_params is not None:
for param in self._shape_params:
cls_info.append(f'{param}={getattr(self, param)}')
cls_info = ', '.join(cls_info)
return f'<{prefix}({cls_info})>'
def __str__(self):
prefix = 'positions'
cls_info = [
('Aperture', self.__class__.__name__),
(prefix, self._positions_str(prefix + ': '))]
if self._shape_params is not None:
for param in self._shape_params:
cls_info.append((param, getattr(self, param)))
fmt = [f'{key}: {val}' for key, val in cls_info]
return '\n'.join(fmt)
@property
def shape(self):
"""
The shape of the instance.
"""
if isinstance(self.positions, SkyCoord):
return self.positions.shape
else:
return self.positions.shape[:-1]
@property
def isscalar(self):
"""
Whether the instance is scalar (i.e., a single position).
"""
return self.shape == ()
class PixelAperture(Aperture):
"""
Abstract base class for apertures defined in pixel coordinates.
"""
@property
def _default_patch_properties(self):
"""
A dictionary of default matplotlib.patches.Patch properties.
"""
mpl_params = dict()
# matplotlib.patches.Patch default is ``fill=True``
mpl_params['fill'] = False
return mpl_params
@staticmethod
def _translate_mask_mode(mode, subpixels, rectangle=False):
if mode not in ('center', 'subpixel', 'exact'):
raise ValueError(f'Invalid mask mode: {mode}')
if rectangle and mode == 'exact':
mode = 'subpixel'
subpixels = 32
if mode == 'subpixels':
if not isinstance(subpixels, int) or subpixels <= 0:
raise ValueError('subpixels must be a strictly positive '
'integer')
if mode == 'center':
use_exact = 0
subpixels = 1
elif mode == 'subpixel':
use_exact = 0
elif mode == 'exact':
use_exact = 1
subpixels = 1
return use_exact, subpixels
@property
@abc.abstractmethod
def _xy_extents(self):
"""
The (x, y) extents of the aperture measured from the center
position.
In other words, the (x, y) extents are half of the aperture
minimal bounding box size in each dimension.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
@property
def bbox(self):
"""
The minimal bounding box for the aperture.
If the aperture is scalar then a single
`~photutils.aperture.BoundingBox` is returned, otherwise a list
of `~photutils.aperture.BoundingBox` is returned.
"""
positions = np.atleast_2d(self.positions)
x_delta, y_delta = self._xy_extents
xmin = positions[:, 0] - x_delta
xmax = positions[:, 0] + x_delta
ymin = positions[:, 1] - y_delta
ymax = positions[:, 1] + y_delta
bboxes = [BoundingBox.from_float(x0, x1, y0, y1)
for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)]
if self.isscalar:
return bboxes[0]
else:
return bboxes
@property
def _centered_edges(self):
"""
A list of ``(xmin, xmax, ymin, ymax)`` tuples, one for each
position, of the pixel edges after recentering the aperture at
the origin.
These pixel edges are used by the low-level `photutils.geometry`
functions.
"""
edges = []
for position, bbox in zip(np.atleast_2d(self.positions),
np.atleast_1d(self.bbox)):
xmin = bbox.ixmin - 0.5 - position[0]
xmax = bbox.ixmax - 0.5 - position[0]
ymin = bbox.iymin - 0.5 - position[1]
ymax = bbox.iymax - 0.5 - position[1]
edges.append((xmin, xmax, ymin, ymax))
return edges
@property
def area(self):
"""
The exact area of the aperture shape.
Returns
-------
area : float
The aperture area.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
@abc.abstractmethod
def to_mask(self, method='exact', subpixels=5):
"""
Return a mask for the aperture.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
mask : `~photutils.aperture.ApertureMask` or list of `~photutils.aperture.ApertureMask`
A mask for the aperture. If the aperture is scalar then a
single `~photutils.aperture.ApertureMask` is returned,
otherwise a list of `~photutils.aperture.ApertureMask` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
def area_overlap(self, data, *, mask=None, method='exact', subpixels=5):
"""
Return the areas of the aperture masks that overlap with the
data, i.e., how many pixels are actually used to calculate each
sum.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is masked. Masked data are excluded from the area overlap.
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
areas : float or array_like
The overlapping areas between the aperture masks and the data.
"""
apermasks = self.to_mask(method=method, subpixels=subpixels)
if self.isscalar:
apermasks = (apermasks,)
if mask is not None:
mask = np.asarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape')
data = np.ones_like(data)
vals = [apermask.get_values(data, mask=mask) for apermask in apermasks]
# if the aperture does not overlap the data return np.nan
areas = [val.sum() if val.shape != (0,) else np.nan for val in vals]
if self.isscalar:
return areas[0]
else:
return areas
def _do_photometry(self, data, variance, method='exact', subpixels=5,
unit=None):
aperture_sums = []
aperture_sum_errs = []
masks = self.to_mask(method=method, subpixels=subpixels)
if self.isscalar:
masks = (masks,)
for apermask in masks:
values = apermask.get_values(data)
# if the aperture does not overlap the data return np.nan
aper_sum = values.sum() if values.shape != (0,) else np.nan
aperture_sums.append(aper_sum)
if variance is not None:
values = apermask.get_values(variance)
# if the aperture does not overlap the data return np.nan
aper_var = values.sum() if values.shape != (0,) else np.nan
aperture_sum_errs.append(np.sqrt(aper_var))
aperture_sums = np.array(aperture_sums)
aperture_sum_errs = np.array(aperture_sum_errs)
# apply units
if unit is not None:
aperture_sums = aperture_sums * unit # can't use *= w/old numpy
aperture_sum_errs = aperture_sum_errs * unit
return aperture_sums, aperture_sum_errs
def do_photometry(self, data, error=None, mask=None, method='exact',
subpixels=5):
"""
Perform aperture photometry on the input data.
Parameters
----------
data : array_like or `~astropy.units.Quantity` instance
The 2D array on which to perform photometry. ``data``
should be background subtracted.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input
``data``. ``error`` is assumed to include *all* sources of
error, including the Poisson error of the sources (see
`~photutils.utils.calc_total_error`) . ``error`` must have
the same shape as the input ``data``.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is masked. Masked data are excluded from all calculations.
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity`
The sums within each aperture.
aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity`
The errors on the sums within each aperture.
Notes
-----
`RectangularAperture` and `RectangularAnnulus` photometry with
the "exact" method uses a subpixel approximation by subdividing
each data pixel by a factor of 1024 (``subpixels = 32``). For
rectangular aperture widths and heights in the range from
2 to 100 pixels, this subpixel approximation gives results
typically within 0.001 percent or better of the exact value.
The differences can be larger for smaller apertures (e.g.,
aperture sizes of one pixel or smaller). For such small sizes,
it is recommend to set ``method='subpixel'`` with a larger
``subpixels`` size.
"""
# validate inputs
data, error = _validate_inputs(data, error)
# handle data, error, and unit inputs
# output data and error are ndarray without units
data, error, unit = _handle_units(data, error)
# compute variance and apply input mask
data, variance = _prepare_photometry_data(data, error, mask)
return self._do_photometry(data, variance, method=method,
subpixels=subpixels, unit=unit)
@staticmethod
def _make_annulus_path(patch_inner, patch_outer):
"""
Define a matplotlib annulus path from two patches.
This preserves the cubic Bezier curves (CURVE4) of the aperture
paths.
"""
import matplotlib.path as mpath
path_inner = patch_inner.get_path()
transform_inner = patch_inner.get_transform()
path_inner = transform_inner.transform_path(path_inner)
path_outer = patch_outer.get_path()
transform_outer = patch_outer.get_transform()
path_outer = transform_outer.transform_path(path_outer)
verts_inner = path_inner.vertices[:-1][::-1]
verts_inner = np.concatenate((verts_inner, [verts_inner[-1]]))
verts = np.vstack((path_outer.vertices, verts_inner))
codes = np.hstack((path_outer.codes, path_inner.codes))
return mpath.Path(verts, codes)
def _define_patch_params(self, origin=(0, 0), **kwargs):
"""
Define the aperture patch position and set any default
matplotlib patch keywords (e.g., ``fill=False``).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
xy_positions : `~numpy.ndarray`
The aperture patch positions.
patch_params : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
"""
xy_positions = copy.deepcopy(np.atleast_2d(self.positions))
xy_positions[:, 0] -= origin[0]
xy_positions[:, 1] -= origin[1]
patch_params = self._default_patch_properties
patch_params.update(kwargs)
return xy_positions, patch_params
@abc.abstractmethod
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.patch` or list of `~matplotlib.patches.patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.patch` is returned, otherwise a
list of `~matplotlib.patches.patch` is returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
def plot(self, axes=None, origin=(0, 0), **kwargs):
"""
Plot the aperture on a matplotlib `~matplotlib.axes.Axes`
instance.
Parameters
----------
axes : `matplotlib.axes.Axes` or `None`, optional
The matplotlib axes on which to plot. If `None`, then the
current `~matplotlib.axes.Axes` instance is used.
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : list of `~matplotlib.patches.Patch`
A list of matplotlib patches for the plotted aperture. The
patches can be used, for example, when adding a plot legend.
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
patches = self._to_patch(origin=origin, **kwargs)
if self.isscalar:
patches = (patches,)
for patch in patches:
axes.add_patch(patch)
return patches
def _to_sky_params(self, wcs):
"""
Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
sky_params : `dict`
A dictionary of parameters for an equivalent sky aperture.
"""
sky_params = {}
xpos, ypos = np.transpose(self.positions)
sky_params['positions'] = wcs.pixel_to_world(xpos, ypos)
# Aperture objects require scalar shape parameters (e.g.,
# radius, a, b, theta, etc.), therefore we must calculate the
# pixel scale and angle at only a single sky position, which
# we take as the first aperture position. For apertures with
# multiple positions used with a WCS that contains distortions
# (e.g., a spatially-dependent pixel scale), this may lead to
# unexpected results (e.g., results that are dependent of the
# order of the positions). There is no good way to fix this with
# the current Aperture API allowing multiple positions.
skypos = sky_params['positions']
if not self.isscalar:
skypos = skypos[0]
_, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs)
shape_params = list(self._shape_params)
theta_key = 'theta'
if theta_key in shape_params:
sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad)
shape_params.remove(theta_key)
for shape_param in shape_params:
value = getattr(self, shape_param)
sky_params[shape_param] = (value * u.pix * pixscale).to(u.arcsec)
return sky_params
@abc.abstractmethod
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyAperture` object defined in
celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyAperture` object
A `SkyAperture` object.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class SkyAperture(Aperture):
"""
Abstract base class for all apertures defined in celestial
coordinates.
"""
def _to_pixel_params(self, wcs):
"""
Convert the sky aperture parameters to those for a pixel
aperture.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
pixel_params : `dict`
A dictionary of parameters for an equivalent pixel aperture.
"""
pixel_params = {}
xpos, ypos = wcs.world_to_pixel(self.positions)
pixel_params['positions'] = np.transpose((xpos, ypos))
# Aperture objects require scalar shape parameters (e.g.,
# radius, a, b, theta, etc.), therefore we must calculate the
# pixel scale and angle at only a single sky position, which
# we take as the first aperture position. For apertures with
# multiple positions used with a WCS that contains distortions
# (e.g., a spatially-dependent pixel scale), this may lead to
# unexpected results (e.g., results that are dependent of the
# order of the positions). There is no good way to fix this with
# the current Aperture API allowing multiple positions.
if self.isscalar:
skypos = self.positions
else:
skypos = self.positions[0]
_, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs)
shape_params = list(self._shape_params)
theta_key = 'theta'
if theta_key in shape_params:
pixel_params[theta_key] = (self.theta + angle).to(u.radian).value
shape_params.remove(theta_key)
for shape_param in shape_params:
value = getattr(self, shape_param)
if value.unit.physical_type == 'angle':
pixel_params[shape_param] = ((value / pixscale)
.to(u.pixel).value)
else:
pixel_params[shape_param] = value.value
return pixel_params
@abc.abstractmethod
def to_pixel(self, wcs):
"""
Convert the aperture to a `PixelAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `PixelAperture` object
A `PixelAperture` object.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
| astropy/photutils | photutils/aperture/core.py | Python | bsd-3-clause | 26,226 | [
"Gaussian"
] | 9dd5372066fefeca3a1b89c900e01fde7a5d1bf3357890341bbb8ef02e4b3f15 |
import openvoronoi as ovd
import ovdvtk # comes with openvoronoi
import time
import vtk
import datetime
import math
import random
import os
import sys
import pickle
import gzip
import ovdgenerators as gens # comes with openvoronoi
import randompolygon as rpg # random polygon generator see https://github.com/aewallin/CGAL_RPG
def draw_vd(vd, times):
# w=2500
# h=1500
# w=1920
# h=1080
w = 1024
h = 1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
far = 1
camPos = far
zmult = 3
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 0
vod.drawGenerators = 0
vod.offsetEdges = 0
vd.setEdgeOffset(0.05)
# times=[]
# times.append( 1 )
# times.append( 1 )
vod.setVDText2(times)
vod.setAll()
myscreen.render()
# w2if.Modified()
# lwr.SetFileName("{0}.png".format(Nmax))
# lwr.Write()
myscreen.iren.Start()
def rpg_vd(Npts, seed, debug):
far = 1
vd = ovd.VoronoiDiagram(far, 120)
vd.reset_vertex_count()
poly = rpg.rpg(Npts, seed)
pts = []
for p in poly:
ocl_pt = ovd.Point(p[0], p[1])
pts.append(ocl_pt)
print ocl_pt
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts:
# print " adding vertex ",m
id_list.append(vd.addVertexSite(p))
m = m + 1
"""
print "polygon is: "
for idx in id_list:
print idx," ",
print "."
"""
t_after = time.time()
times.append(t_after - t_before)
# print " pts inserted in ", times[0], " s"
# print " vd-check: ",vd.check()
if (debug):
vd.debug_on()
t_before = time.time()
for n in range(len(id_list)):
n_nxt = n + 1
if n == (len(id_list) - 1):
n_nxt = 0 # point 0 is the endpoint of the last segment
# print " adding line-site ", id_list[n]," - ", id_list[n_nxt]
vd.addLineSite(id_list[n], id_list[n_nxt])
t_after = time.time()
times.append(t_after - t_before)
print " segs inserted in ", times[1], " s"
is_valid = vd.check()
print " vd-check: ", is_valid
return [is_valid, vd, times]
def loop_run(Npts, max_seed, debug=False, debug_seed=-1):
# Npts = 3
# max_seed = 1000
seed_range = range(max_seed)
for seed in seed_range:
debug2 = debug
if (seed == debug_seed):
print "debug seed!"
debug2 = True
result = rpg_vd(Npts, seed, debug2)
print "N=", Npts, " s=", seed, " ok?=", result
assert (result[0] == True)
def single_run(Npts, seed, debug=False):
result = rpg_vd(Npts, seed, debug)
print "N=", Npts, " s=", seed, " ok?=", result
assert (result[0] == True)
return result
if __name__ == "__main__":
# loop_run(50,300)
r = single_run(50, int(37))
vd = r[1]
pi = ovd.PolygonInterior(True)
vd.filter_graph(pi)
draw_vd(vd, r[2])
| aewallin/openvoronoi | python_examples/chain_3_rpg_loop.py | Python | lgpl-2.1 | 3,649 | [
"VTK"
] | 6eaea0b719ca422b9dadaebe47d237725c7166ebb61f16fb2730610142552ebf |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements more advanced transformations.
"""
import logging
import math
import warnings
from fractions import Fraction
from itertools import groupby, product
from math import gcd
from string import ascii_lowercase
from typing import Dict, Optional
import numpy as np
from monty.dev import requires
from monty.fractions import lcm
from monty.json import MSONable
from pymatgen.analysis.adsorption import AdsorbateSiteFinder
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.analysis.gb.grain import GrainBoundaryGenerator
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.analysis.structure_matcher import SpinComparator, StructureMatcher
from pymatgen.analysis.structure_prediction.substitution_probability import (
SubstitutionPredictor,
)
from pymatgen.command_line.enumlib_caller import EnumError, EnumlibAdaptor
from pymatgen.command_line.mcsqs_caller import run_mcsqs
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.surface import SlabGenerator
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import (
OrderDisorderedStructureTransformation,
SubstitutionTransformation,
SupercellTransformation,
)
from pymatgen.transformations.transformation_abc import AbstractTransformation
try:
import hiphive # type: ignore
except ImportError:
hiphive = None
__author__ = "Shyue Ping Ong, Stephen Dacek, Anubhav Jain, Matthew Horton, Alex Ganose"
logger = logging.getLogger(__name__)
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
"""
def __init__(self, charge_balance_sp):
"""
Args:
charge_balance_sp: specie to add or remove. Currently only removal
is supported
"""
self.charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
"""
Applies the transformation.
Args:
structure: Input Structure
Returns:
Charge balanced structure.
"""
charge = structure.charge
specie = get_el_sp(self.charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by ChargeBalanceTransformation")
trans = SubstitutionTransformation({self.charge_balance_sp: {self.charge_balance_sp: 1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + f"Species to remove = {str(self.charge_balance_sp)}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
"""
def __init__(self, transformations, nstructures_per_trans=1):
"""
Args:
transformations ([transformations]): List of transformations to apply
to a structure. One transformation is applied to each output
structure.
nstructures_per_trans (int): If the transformations are one-to-many and,
nstructures_per_trans structures from each transformation are
added to the full list. Defaults to 1, i.e., only best structure.
"""
self._transformations = transformations
self.nstructures_per_trans = nstructures_per_trans
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Structures with all transformations applied.
"""
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure output. Must use return_ranked_list")
structures = []
for t in self._transformations:
if t.is_one_to_many:
for d in t.apply_transformation(structure, return_ranked_list=self.nstructures_per_trans):
d["transformation"] = t
structures.append(d)
else:
structures.append(
{
"transformation": t,
"structure": t.apply_transformation(structure),
}
)
return structures
def __str__(self):
return "Super Transformation : Transformations = " + "{}".format(
" ".join([str(t) for t in self._transformations])
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class MultipleSubstitutionTransformation:
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(
self,
sp_to_replace,
r_fraction,
substitution_dict,
charge_balance_species=None,
order=True,
):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace: species to be replaced
r_fraction: fraction of that specie to replace
substitution_dict: dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species: If specified, will balance the charge on
the structure using that specie.
"""
self.sp_to_replace = sp_to_replace
self.r_fraction = r_fraction
self.substitution_dict = substitution_dict
self.charge_balance_species = charge_balance_species
self.order = order
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Structures with all substitutions applied.
"""
if not return_ranked_list:
raise ValueError(
"MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list."
)
outputs = []
for charge, el_list in self.substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = f"X{str(charge)}{sign}"
mapping[self.sp_to_replace] = {
self.sp_to_replace: 1 - self.r_fraction,
dummy_sp: self.r_fraction,
}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self.charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self.charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self.order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation({f"X{str(charge)}+": f"{el}{charge}{sign}"})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + f"{self.sp_to_replace}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
"""
def __init__(
self,
min_cell_size=1,
max_cell_size=1,
symm_prec=0.1,
refine_structure=False,
enum_precision_parameter=0.001,
check_ordered_symmetry=True,
max_disordered_sites=None,
sort_criteria="ewald",
timeout=None,
):
"""
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
max_disordered_sites (int):
An alternate parameter to max_cell size. Will sequentially try
larger and larger cell sizes until (i) getting a result or (ii)
the number of disordered sites in the cell exceeds
max_disordered_sites. Must set max_cell_size to None when using
this parameter.
sort_criteria (str): Sort by Ewald energy ("ewald", must have oxidation
states and slow) or by number of sites ("nsites", much faster).
timeout (float): timeout in minutes to pass to EnumlibAdaptor
"""
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.max_disordered_sites = max_disordered_sites
self.sort_criteria = sort_criteria
self.timeout = timeout
if max_cell_size and max_disordered_sites:
raise ValueError("Cannot set both max_cell_size and max_disordered_sites!")
def apply_transformation(self, structure, return_ranked_list=False):
"""
Returns either a single ordered structure or a sequence of all ordered
structures.
Args:
structure: Structure to order.
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if self.refine_structure:
finder = SpacegroupAnalyzer(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = all(
hasattr(sp, "oxi_state") and sp.oxi_state != 0 for sp in structure.composition.elements
)
structures = None
if structure.is_ordered:
warnings.warn(
"Enumeration skipped for structure with composition {} "
"because it is ordered".format(structure.composition)
)
structures = [structure.copy()]
if self.max_disordered_sites:
ndisordered = sum(1 for site in structure if not site.is_ordered)
if ndisordered > self.max_disordered_sites:
raise ValueError(f"Too many disordered sites! ({ndisordered} > {self.max_disordered_sites})")
max_cell_sizes = range(
self.min_cell_size,
int(math.floor(self.max_disordered_sites / ndisordered)) + 1,
)
else:
max_cell_sizes = [self.max_cell_size]
for max_cell_size in max_cell_sizes:
adaptor = EnumlibAdaptor(
structure,
min_cell_size=self.min_cell_size,
max_cell_size=max_cell_size,
symm_prec=self.symm_prec,
refine_structure=False,
enum_precision_parameter=self.enum_precision_parameter,
check_ordered_symmetry=self.check_ordered_symmetry,
timeout=self.timeout,
)
try:
adaptor.run()
structures = adaptor.structures
if structures:
break
except EnumError:
warnings.warn(f"Unable to enumerate for max_cell_size = {max_cell_size}")
if structures is None:
raise ValueError("Unable to enumerate")
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple(tuple(int(round(cell)) for cell in row) for row in transformation)
if contains_oxidation_state and self.sort_criteria == "ewald":
if transformation not in ewald_matrices:
s_supercell = structure * transformation
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy, "structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return (
s["energy"] / s["num_sites"]
if contains_oxidation_state and self.sort_criteria == "ewald"
else s["num_sites"]
)
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
"""
def __init__(self, threshold=1e-2, scale_volumes=True, **kwargs):
r"""
Args:
threshold: Threshold for substitution.
scale_volumes: Whether to scale volumes after substitution.
**kwargs: Args for SubstitutionProbability class lambda_table, alpha
"""
self.kwargs = kwargs
self.threshold = threshold
self.scale_volumes = scale_volumes
self._substitutor = SubstitutionPredictor(threshold=threshold, **kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Predicted Structures.
"""
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't support returning 1 structure")
preds = self._substitutor.composition_prediction(structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x["probability"], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred["substitutions"])
output = {
"structure": st.apply_transformation(structure),
"probability": pred["probability"],
"threshold": self.threshold,
"substitutions": {},
}
# dictionary keys have to be converted to strings for JSON
for key, value in pred["substitutions"].items():
output["substitutions"][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class MagOrderParameterConstraint(MSONable):
"""
This class can be used to supply MagOrderingTransformation
to just a specific subset of species or sites that satisfy the
provided constraints. This can be useful for setting an order
parameters for, for example, ferrimagnetic structures which
might order on certain motifs, with the global order parameter
dependent on how many sites satisfy that motif.
"""
def __init__(
self,
order_parameter,
species_constraints=None,
site_constraint_name=None,
site_constraints=None,
):
"""
:param order_parameter (float): any number from 0.0 to 1.0,
typically 0.5 (antiferromagnetic) or 1.0 (ferromagnetic)
:param species_constraint (list): str or list of strings
of Species symbols that the constraint should apply to
:param site_constraint_name (str): name of the site property
that the constraint should apply to, e.g. "coordination_no"
:param site_constraints (list): list of values of the site
property that the constraints should apply to
"""
# validation
if site_constraints and site_constraints != [None] and not site_constraint_name:
raise ValueError("Specify the name of the site constraint.")
if not site_constraints and site_constraint_name:
raise ValueError("Please specify some site constraints.")
if not isinstance(species_constraints, list):
species_constraints = [species_constraints]
if not isinstance(site_constraints, list):
site_constraints = [site_constraints]
if order_parameter > 1 or order_parameter < 0:
raise ValueError("Order parameter must lie between 0 and 1")
if order_parameter != 0.5:
warnings.warn(
"Use care when using a non-standard order parameter, "
"though it can be useful in some cases it can also "
"lead to unintended behavior. Consult documentation."
)
self.order_parameter = order_parameter
self.species_constraints = species_constraints
self.site_constraint_name = site_constraint_name
self.site_constraints = site_constraints
def satisfies_constraint(self, site):
"""
Checks if a periodic site satisfies the constraint.
"""
if not site.is_ordered:
return False
satisfies_constraints = self.species_constraints and str(site.specie) in self.species_constraints
if self.site_constraint_name and self.site_constraint_name in site.properties:
prop = site.properties[self.site_constraint_name]
satisfies_constraints = prop in self.site_constraints
return satisfies_constraints
class MagOrderingTransformation(AbstractTransformation):
"""
This transformation takes a structure and returns a list of collinear
magnetic orderings. For disordered structures, make an ordered
approximation first.
"""
def __init__(self, mag_species_spin, order_parameter=0.5, energy_model=SymmetryModel(), **kwargs):
"""
:param mag_species_spin: A mapping of elements/species to their
spin magnitudes, e.g. {"Fe3+": 5, "Mn3+": 4}
:param order_parameter (float or list): if float, a specifies a
global order parameter and can take values from 0.0 to 1.0
(e.g. 0.5 for antiferromagnetic or 1.0 for ferromagnetic), if
list has to be a list of
:class: `pymatgen.transformations.advanced_transformations.MagOrderParameterConstraint`
to specify more complicated orderings, see documentation for
MagOrderParameterConstraint more details on usage
:param energy_model: Energy model to rank the returned structures,
see :mod: `pymatgen.analysis.energy_models` for more information (note
that this is not necessarily a physical energy). By default, returned
structures use SymmetryModel() which ranks structures from most
symmetric to least.
:param kwargs: Additional kwargs that are passed to
:class:`EnumerateStructureTransformation` such as min_cell_size etc.
"""
# checking for sensible order_parameter values
if isinstance(order_parameter, float):
# convert to constraint format
order_parameter = [
MagOrderParameterConstraint(
order_parameter=order_parameter,
species_constraints=list(mag_species_spin.keys()),
)
]
elif isinstance(order_parameter, list):
ops = [isinstance(item, MagOrderParameterConstraint) for item in order_parameter]
if not any(ops):
raise ValueError("Order parameter not correctly defined.")
else:
raise ValueError("Order parameter not correctly defined.")
self.mag_species_spin = mag_species_spin
# store order parameter constraints as dicts to save implementing
# to/from dict methods for MSONable compatibility
self.order_parameter = [op.as_dict() for op in order_parameter]
self.energy_model = energy_model
self.enum_kwargs = kwargs
@staticmethod
def determine_min_cell(disordered_structure):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
# assumes all order parameters for a given species are the same
mag_species_order_parameter = {}
mag_species_occurrences = {}
for idx, site in enumerate(disordered_structure):
if not site.is_ordered:
op = max(site.species.values())
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(list(site.species.keys())[0]).split(",", maxsplit=1)[0]
if sp in mag_species_order_parameter:
mag_species_occurrences[sp] += 1
else:
mag_species_order_parameter[sp] = op
mag_species_occurrences[sp] = 1
smallest_n = []
for sp, order_parameter in mag_species_order_parameter.items():
denom = Fraction(order_parameter).limit_denominator(100).denominator
num_atom_per_specie = mag_species_occurrences[sp]
n_gcd = gcd(denom, num_atom_per_specie)
smallest_n.append(lcm(int(n_gcd), denom) / n_gcd)
return max(smallest_n)
@staticmethod
def _add_dummy_species(structure, order_parameters):
"""
:param structure: ordered Structure
:param order_parameters: list of MagOrderParameterConstraints
:return: A structure decorated with disordered
DummySpecies on which to perform the enumeration.
Note that the DummySpecies are super-imposed on
to the original sites, to make it easier to
retrieve the original site after enumeration is
performed (this approach is preferred over a simple
mapping since multiple species may have the same
DummySpecies, depending on the constraints specified).
This approach can also preserve site properties even after
enumeration.
"""
dummy_struct = structure.copy()
def generate_dummy_specie():
"""
Generator which returns DummySpecies symbols Mma, Mmb, etc.
"""
subscript_length = 1
while True:
for subscript in product(ascii_lowercase, repeat=subscript_length):
yield "Mm" + "".join(subscript)
subscript_length += 1
dummy_species_gen = generate_dummy_specie()
# one dummy species for each order parameter constraint
dummy_species_symbols = [next(dummy_species_gen) for i in range(len(order_parameters))]
dummy_species = [
{
DummySpecies(symbol, properties={"spin": Spin.up}): constraint.order_parameter,
DummySpecies(symbol, properties={"spin": Spin.down}): 1 - constraint.order_parameter,
}
for symbol, constraint in zip(dummy_species_symbols, order_parameters)
]
for idx, site in enumerate(dummy_struct):
satisfies_constraints = [c.satisfies_constraint(site) for c in order_parameters]
if satisfies_constraints.count(True) > 1:
# site should either not satisfy any constraints, or satisfy
# one constraint
raise ValueError(
f"Order parameter constraints conflict for site: {str(site.specie)}, {site.properties}"
)
if any(satisfies_constraints):
dummy_specie_idx = satisfies_constraints.index(True)
dummy_struct.append(dummy_species[dummy_specie_idx], site.coords, site.lattice)
return dummy_struct
@staticmethod
def _remove_dummy_species(structure):
"""
:return: Structure with dummy species removed, but
their corresponding spin properties merged with the
original sites. Used after performing enumeration.
"""
if not structure.is_ordered:
raise Exception("Something went wrong with enumeration.")
sites_to_remove = []
logger.debug(f"Dummy species structure:\n{str(structure)}")
for idx, site in enumerate(structure):
if isinstance(site.specie, DummySpecies):
sites_to_remove.append(idx)
spin = site.specie._properties.get("spin", None)
neighbors = structure.get_neighbors(
site,
0.05, # arbitrary threshold, needs to be << any bond length
# but >> floating point precision issues
include_index=True,
)
if len(neighbors) != 1:
raise Exception(f"This shouldn't happen, found neighbors: {neighbors}")
orig_site_idx = neighbors[0][2]
orig_specie = structure[orig_site_idx].specie
new_specie = Species(
orig_specie.symbol,
getattr(orig_specie, "oxi_state", None),
properties={"spin": spin},
)
structure.replace(
orig_site_idx,
new_specie,
properties=structure[orig_site_idx].properties,
)
structure.remove_sites(sites_to_remove)
logger.debug(f"Structure with dummy species removed:\n{str(structure)}")
return structure
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, "_properties", None):
spin = site.specie._properties.get("spin", None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",", maxsplit=1)[0]
new_properties.update({"spin": sign * self.mag_species_spin.get(sp, 0)})
new_specie = Species(
site.specie.symbol,
getattr(site.specie, "oxi_state", None),
new_properties,
)
structure.replace(idx, new_specie, properties=site.properties)
logger.debug(f"Structure with spin magnitudes:\n{str(structure)}")
return structure
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply MagOrderTransformation to an input structure.
:param structure: Any ordered structure.
:param return_ranked_list: As in other Transformations.
:return:
"""
if not structure.is_ordered:
raise ValueError("Create an ordered approximation of your input structure first.")
# retrieve order parameters
order_parameters = [MagOrderParameterConstraint.from_dict(op_dict) for op_dict in self.order_parameter]
# add dummy species on which to perform enumeration
structure = self._add_dummy_species(structure, order_parameters)
# trivial case
if structure.is_ordered:
structure = self._remove_dummy_species(structure)
return [structure] if return_ranked_list > 1 else structure
enum_kwargs = self.enum_kwargs.copy()
enum_kwargs["min_cell_size"] = max(int(self.determine_min_cell(structure)), enum_kwargs.get("min_cell_size", 1))
if enum_kwargs.get("max_cell_size", None):
if enum_kwargs["min_cell_size"] > enum_kwargs["max_cell_size"]:
warnings.warn(
"Specified max cell size ({}) is smaller "
"than the minimum enumerable cell size ({}), "
"changing max cell size to {}".format(
enum_kwargs["max_cell_size"],
enum_kwargs["min_cell_size"],
enum_kwargs["min_cell_size"],
)
)
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
else:
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
t = EnumerateStructureTransformation(**enum_kwargs)
alls = t.apply_transformation(structure, return_ranked_list=return_ranked_list)
# handle the fact that EnumerateStructureTransformation can either
# return a single Structure or a list
if isinstance(alls, Structure):
# remove dummy species and replace Spin.up or Spin.down
# with spin magnitudes given in mag_species_spin arg
alls = self._remove_dummy_species(alls)
alls = self._add_spin_magnitudes(alls)
else:
for idx, _ in enumerate(alls):
alls[idx]["structure"] = self._remove_dummy_species(alls[idx]["structure"])
alls[idx]["structure"] = self._add_spin_magnitudes(alls[idx]["structure"])
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1 or not return_ranked_list:
return alls[0]["structure"] if num_to_return else alls
# remove duplicate structures and group according to energy model
m = StructureMatcher(comparator=SpinComparator())
def key(x):
return SpacegroupAnalyzer(x, 0.1).get_space_group_number()
out = []
for _, g in groupby(sorted((d["structure"] for d in alls), key=key), key):
g = list(g)
grouped = m.group_structures(g)
out.extend([{"structure": g[0], "energy": self.energy_model.get_energy(g[0])} for g in grouped])
self._all_structures = sorted(out, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Species) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Species) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sp = Species(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except Exception:
pass
return min(candidates, key=lambda l: abs(l[0] / ref_radius - 1))[1]
class DopingTransformation(AbstractTransformation):
"""
A transformation that performs doping of a structure.
"""
def __init__(
self,
dopant,
ionic_radius_tol=float("inf"),
min_length=10,
alio_tol=0,
codopant=False,
max_structures_per_enum=100,
allowed_doping_species=None,
**kwargs,
):
r"""
Args:
dopant (Species-like): E.g., Al3+. Must have oxidation state.
ionic_radius_tol (float): E.g., Fractional allowable ionic radii
mismatch for dopant to fit into a site. Default of inf means
that any dopant with the right oxidation state is allowed.
min_Length (float): Min. lattice parameter between periodic
images of dopant. Defaults to 10A for now.
alio_tol (int): If this is not 0, attempt will be made to dope
sites with oxidation_states +- alio_tol of the dopant. E.g.,
1 means that the ions like Ca2+ and Ti4+ are considered as
potential doping sites for Al3+.
codopant (bool): If True, doping will be carried out with a
codopant to maintain charge neutrality. Otherwise, vacancies
will be used.
max_structures_per_enum (float): Maximum number of structures to
return per enumeration. Note that there can be more than one
candidate doping site, and each site enumeration will return at
max max_structures_per_enum structures. Defaults to 100.
allowed_doping_species (list): Species that are allowed to be
doping sites. This is an inclusionary list. If specified,
any sites which are not
**kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.dopant = get_el_sp(dopant)
self.ionic_radius_tol = ionic_radius_tol
self.min_length = min_length
self.alio_tol = alio_tol
self.codopant = codopant
self.max_structures_per_enum = max_structures_per_enum
self.allowed_doping_species = allowed_doping_species
self.kwargs = kwargs
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info("Composition: %s" % comp)
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol
]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp
for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol
and abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol
and sp.oxi_state * ox >= 0
]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species if sp in [get_el_sp(s) for s in self.allowed_doping_species]
]
logger.info("Compatible species: %s" % compatible_species)
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length / x)))) for x in lengths]
logger.info("Lengths are %s" % str(lengths))
logger.info("Scaling = %s" % str(scaling))
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1) / nsp, self.dopant: 1 / nsp}})
logger.info(f"Doping {sp} for {self.dopant} at level {1 / nsp:.3f}")
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp, self.dopant: 1 / nsp, codopant: 1 / nsp}})
logger.info(f"Doping {sp} for {self.dopant} + {codopant} at level {1 / nsp:.3f}")
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min(
(s for s in comp if s.oxi_state * ox > 0),
key=lambda ss: abs(ss.oxi_state),
)
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info("Doping %d %s with %d %s." % (nsp_to_remove, sp, ndopant, self.dopant))
supercell.replace_species(
{
sp: {
sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp,
}
}
)
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info(
"Doping %d %s with %s and removing %d %s."
% (ndopant, sp, self.dopant, nx_to_remove, sp_to_remove)
)
supercell.replace_species(
{
sp: {sp: (nsp - ndopant) / nsp, self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove) / nx},
}
)
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(), key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(), key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info(
"Doping %d %s with %s and removing %d %s." % (ndopant, sp, self.dopant, nx_to_remove, sp_to_remove)
)
supercell.replace_species(
{
sp: {sp: (nsp - ndopant) / nsp, self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove) / nx},
}
)
ss = t.apply_transformation(supercell, return_ranked_list=self.max_structures_per_enum)
logger.info("%s distinct structures" % len(ss))
all_structures.extend(ss)
logger.info("Total %s doped structures" % len(all_structures))
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"]
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class SlabTransformation(AbstractTransformation):
"""
A transformation that creates a slab from a structure.
"""
def __init__(
self,
miller_index,
min_slab_size,
min_vacuum_size,
lll_reduce=False,
center_slab=False,
in_unit_planes=False,
primitive=True,
max_normal_search=None,
shift=0,
tol=0.1,
):
"""
Args:
miller_index (3-tuple or list): miller index of slab
min_slab_size (float): minimum slab size in angstroms
min_vacuum_size (float): minimum size of vacuum
lll_reduce (bool): whether to apply LLL reduction
center_slab (bool): whether to center the slab
primitive (bool): whether to reduce slabs to most primitive cell
max_normal_search (int): maximum index to include in linear
combinations of indices to find c lattice vector orthogonal
to slab surface
shift (float): shift to get termination
tol (float): tolerance for primitive cell finding
"""
self.miller_index = miller_index
self.min_slab_size = min_slab_size
self.min_vacuum_size = min_vacuum_size
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.in_unit_planes = in_unit_planes
self.primitive = primitive
self.max_normal_search = max_normal_search
self.shift = shift
self.tol = tol
def apply_transformation(self, structure):
"""
Applies the transformation.
Args:
structure: Input Structure
Returns:
Slab Structures.
"""
sg = SlabGenerator(
structure,
self.miller_index,
self.min_slab_size,
self.min_vacuum_size,
self.lll_reduce,
self.center_slab,
self.in_unit_planes,
self.primitive,
self.max_normal_search,
)
slab = sg.get_slab(self.shift, self.tol)
return slab
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class DisorderOrderedTransformation(AbstractTransformation):
"""
Not to be confused with OrderDisorderedTransformation,
this transformation attempts to obtain a
*disordered* structure from an input ordered structure.
This may or may not be physically plausible, further
inspection of the returned structures is advised.
The main purpose for this transformation is for structure
matching to crystal prototypes for structures that have
been derived from a parent prototype structure by
substitutions or alloying additions.
"""
def __init__(self, max_sites_to_merge=2):
"""
Args:
max_sites_to_merge: only merge this number of sites together
"""
self.max_sites_to_merge = max_sites_to_merge
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: ordered structure
return_ranked_list: as in other pymatgen Transformations
Returns:
Transformed disordered structure(s)
"""
if not structure.is_ordered:
raise ValueError("This transformation is for disordered structures only.")
partitions = self._partition_species(structure.composition, max_components=self.max_sites_to_merge)
disorder_mappings = self._get_disorder_mappings(structure.composition, partitions)
disordered_structures = []
for mapping in disorder_mappings:
disordered_structure = structure.copy()
disordered_structure.replace_species(mapping)
disordered_structures.append({"structure": disordered_structure, "mapping": mapping})
if len(disordered_structures) == 0:
return None
if not return_ranked_list:
return disordered_structures[0]["structure"]
if len(disordered_structures) > return_ranked_list:
disordered_structures = disordered_structures[0:return_ranked_list]
return disordered_structures
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
@staticmethod
def _partition_species(composition, max_components=2):
"""
Private method to split a list of species into
various partitions.
"""
def _partition(collection):
# thanks https://stackoverflow.com/a/30134039
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in _partition(collection[1:]):
# insert `first` in each of the subpartition's subsets
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n + 1 :]
# put `first` in its own subset
yield [[first]] + smaller
def _sort_partitions(partitions_to_sort):
"""
Sort partitions by those we want to check first
(typically, merging two sites into one is the
one to try first).
"""
partition_indices = [(idx, [len(p) for p in partition]) for idx, partition in enumerate(partitions_to_sort)]
# sort by maximum length of partition first (try smallest maximums first)
# and secondarily by number of partitions (most partitions first, i.e.
# create the 'least disordered' structures first)
partition_indices = sorted(partition_indices, key=lambda x: (max(x[1]), -len(x[1])))
# merge at most max_component sites,
# e.g. merge at most 2 species into 1 disordered site
partition_indices = [x for x in partition_indices if max(x[1]) <= max_components]
partition_indices.pop(0) # this is just the input structure
sorted_partitions = [partitions_to_sort[x[0]] for x in partition_indices]
return sorted_partitions
collection = list(composition.keys())
partitions = list(_partition(collection))
partitions = _sort_partitions(partitions)
return partitions
@staticmethod
def _get_disorder_mappings(composition, partitions):
"""
Private method to obtain the mapping to create
a disordered structure from a given partition.
"""
def _get_replacement_dict_from_partition(partition):
d = {} # to be passed to Structure.replace_species()
for sp_list in partition:
if len(sp_list) > 1:
total_occ = sum(composition[sp] for sp in sp_list)
merged_comp = {sp: composition[sp] / total_occ for sp in sp_list}
for sp in sp_list:
d[sp] = merged_comp
return d
disorder_mapping = [_get_replacement_dict_from_partition(p) for p in partitions]
return disorder_mapping
class GrainBoundaryTransformation(AbstractTransformation):
"""
A transformation that creates a gb from a bulk structure.
"""
def __init__(
self,
rotation_axis,
rotation_angle,
expand_times=4,
vacuum_thickness=0.0,
ab_shift=None,
normal=False,
ratio=True,
plane=None,
max_search=20,
tol_coi=1.0e-8,
rm_ratio=0.7,
quick_gen=False,
):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float): The thickness of vacuum that you want to insert between
two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers): lattice axial ratio.
If True, will try to determine automatically from structure.
For cubic system, ratio is not needed and can be set to None.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to None.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma with enum*
sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio * bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb (Structure) object).
"""
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.expand_times = expand_times
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift or [0, 0]
self.normal = normal
self.ratio = ratio
self.plane = plane
self.max_search = max_search
self.tol_coi = tol_coi
self.rm_ratio = rm_ratio
self.quick_gen = quick_gen
def apply_transformation(self, structure):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Grain boundary Structures.
"""
gbg = GrainBoundaryGenerator(structure)
gb_struct = gbg.gb_from_parameters(
self.rotation_axis,
self.rotation_angle,
expand_times=self.expand_times,
vacuum_thickness=self.vacuum_thickness,
ab_shift=self.ab_shift,
normal=self.normal,
ratio=gbg.get_ratio() if self.ratio is True else self.ratio,
plane=self.plane,
max_search=self.max_search,
tol_coi=self.tol_coi,
rm_ratio=self.rm_ratio,
quick_gen=self.quick_gen,
)
return gb_struct
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class CubicSupercellTransformation(AbstractTransformation):
"""
A transformation that aims to generate a nearly cubic supercell structure
from a structure.
The algorithm solves for a transformation matrix that makes the supercell
cubic. The matrix must have integer entries, so entries are rounded (in such
a way that forces the matrix to be nonsingular). From the supercell
resulting from this transformation matrix, vector projections are used to
determine the side length of the largest cube that can fit inside the
supercell. The algorithm will iteratively increase the size of the supercell
until the largest inscribed cube's side length is at least 'min_length'
and the number of atoms in the supercell falls in the range
``min_atoms < n < max_atoms``.
"""
def __init__(
self,
min_atoms: Optional[int] = None,
max_atoms: Optional[int] = None,
min_length: float = 15.0,
force_diagonal: bool = False,
):
"""
Args:
max_atoms: Maximum number of atoms allowed in the supercell.
min_atoms: Minimum number of atoms allowed in the supercell.
min_length: Minimum length of the smallest supercell lattice vector.
force_diagonal: If True, return a transformation with a diagonal
transformation matrix.
"""
self.min_atoms = min_atoms if min_atoms else -np.Inf
self.max_atoms = max_atoms if max_atoms else np.Inf
self.min_length = min_length
self.force_diagonal = force_diagonal
self.transformation_matrix = None
def apply_transformation(self, structure: Structure) -> Structure:
"""
The algorithm solves for a transformation matrix that makes the
supercell cubic. The matrix must have integer entries, so entries are
rounded (in such a way that forces the matrix to be nonsingular). From
the supercell resulting from this transformation matrix, vector
projections are used to determine the side length of the largest cube
that can fit inside the supercell. The algorithm will iteratively
increase the size of the supercell until the largest inscribed cube's
side length is at least 'num_nn_dists' times the nearest neighbor
distance and the number of atoms in the supercell falls in the range
defined by min_atoms and max_atoms.
Returns:
supercell: Transformed supercell.
"""
lat_vecs = structure.lattice.matrix
# boolean for if a sufficiently large supercell has been created
sc_not_found = True
if self.force_diagonal:
scale = self.min_length / np.array(structure.lattice.abc)
self.transformation_matrix = np.diag(np.ceil(scale).astype(int))
st = SupercellTransformation(self.transformation_matrix)
return st.apply_transformation(structure)
# target_threshold is used as the desired cubic side lengths
target_sc_size = self.min_length
while sc_not_found:
target_sc_lat_vecs = np.eye(3, 3) * target_sc_size
self.transformation_matrix = target_sc_lat_vecs @ np.linalg.inv(lat_vecs)
# round the entries of T and force T to be nonsingular
self.transformation_matrix = _round_and_make_arr_singular(self.transformation_matrix) # type: ignore
proposed_sc_lat_vecs = self.transformation_matrix @ lat_vecs # type: ignore
# Find the shortest dimension length and direction
a = proposed_sc_lat_vecs[0]
b = proposed_sc_lat_vecs[1]
c = proposed_sc_lat_vecs[2]
length1_vec = c - _proj(c, a) # a-c plane
length2_vec = a - _proj(a, c)
length3_vec = b - _proj(b, a) # b-a plane
length4_vec = a - _proj(a, b)
length5_vec = b - _proj(b, c) # b-c plane
length6_vec = c - _proj(c, b)
length_vecs = np.array(
[
length1_vec,
length2_vec,
length3_vec,
length4_vec,
length5_vec,
length6_vec,
]
)
# Get number of atoms
st = SupercellTransformation(self.transformation_matrix)
superstructure = st.apply_transformation(structure)
num_at = superstructure.num_sites
# Check if constraints are satisfied
if (
np.min(np.linalg.norm(length_vecs, axis=1)) >= self.min_length
and self.min_atoms <= num_at <= self.max_atoms
):
return superstructure
# Increase threshold until proposed supercell meets requirements
target_sc_size += 0.1
if num_at > self.max_atoms:
raise AttributeError(
"While trying to solve for the supercell, the max "
"number of atoms was exceeded. Try lowering the number"
"of nearest neighbor distances."
)
raise AttributeError("Unable to find cubic supercell")
@property
def inverse(self):
"""
Returns:
None
"""
return None
@property
def is_one_to_many(self):
"""
Returns:
False
"""
return False
class AddAdsorbateTransformation(AbstractTransformation):
"""
Create absorbate structures.
"""
def __init__(
self,
adsorbate,
selective_dynamics=False,
height=0.9,
mi_vec=None,
repeat=None,
min_lw=5.0,
translate=True,
reorient=True,
find_args=None,
):
"""
Use AdsorbateSiteFinder to add an absorbate to a slab.
Args:
adsorbate (Molecule): molecule to add as adsorbate
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec : vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
self.adsorbate = adsorbate
self.selective_dynamics = selective_dynamics
self.height = height
self.mi_vec = mi_vec
self.repeat = repeat
self.min_lw = min_lw
self.translate = translate
self.reorient = reorient
self.find_args = find_args
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: Must be a Slab structure
return_ranked_list: Whether or not multiple structures are
returned. If return_ranked_list is a number, up to that number of
structures is returned.
Returns: Slab with adsorbate
"""
sitefinder = AdsorbateSiteFinder(
structure,
selective_dynamics=self.selective_dynamics,
height=self.height,
mi_vec=self.mi_vec,
)
structures = sitefinder.generate_adsorption_structures(
self.adsorbate,
repeat=self.repeat,
min_lw=self.min_lw,
translate=self.translate,
reorient=self.reorient,
find_args=self.find_args,
)
if not return_ranked_list:
return structures[0]
return [{"structure": structure} for structure in structures[:return_ranked_list]]
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
def _round_and_make_arr_singular(arr: np.ndarray) -> np.ndarray:
"""
This function rounds all elements of a matrix to the nearest integer,
unless the rounding scheme causes the matrix to be singular, in which
case elements of zero rows or columns in the rounded matrix with the
largest absolute valued magnitude in the unrounded matrix will be
rounded to the next integer away from zero rather than to the
nearest integer.
The transformation is as follows. First, all entries in 'arr' will be
rounded to the nearest integer to yield 'arr_rounded'. If 'arr_rounded'
has any zero rows, then one element in each zero row of 'arr_rounded'
corresponding to the element in 'arr' of that row with the largest
absolute valued magnitude will be rounded to the next integer away from
zero (see the '_round_away_from_zero(x)' function) rather than the
nearest integer. This process is then repeated for zero columns. Also
note that if 'arr' already has zero rows or columns, then this function
will not change those rows/columns.
Args:
arr: Input matrix
Returns:
Transformed matrix.
"""
def round_away_from_zero(x):
"""
Returns 'x' rounded to the next integer away from 0.
If 'x' is zero, then returns zero.
E.g. -1.2 rounds to -2.0. 1.2 rounds to 2.0.
"""
abs_x = abs(x)
return math.ceil(abs_x) * (abs_x / x) if x != 0 else 0
arr_rounded = np.around(arr)
# Zero rows in 'arr_rounded' make the array singular, so force zero rows to
# be nonzero
if (~arr_rounded.any(axis=1)).any():
# Check for zero rows in T_rounded
# indices of zero rows
zero_row_idxs = np.where(~arr_rounded.any(axis=1))[0]
for zero_row_idx in zero_row_idxs: # loop over zero rows
zero_row = arr[zero_row_idx, :]
# Find the element of the zero row with the largest absolute
# magnitude in the original (non-rounded) array (i.e. 'arr')
matches = np.absolute(zero_row) == np.amax(np.absolute(zero_row))
col_idx_to_fix = np.where(matches)[0]
# Break ties for the largest absolute magnitude
r_idx = np.random.randint(len(col_idx_to_fix))
col_idx_to_fix = col_idx_to_fix[r_idx]
# Round the chosen element away from zero
arr_rounded[zero_row_idx, col_idx_to_fix] = round_away_from_zero(arr[zero_row_idx, col_idx_to_fix])
# Repeat process for zero columns
if (~arr_rounded.any(axis=0)).any():
# Check for zero columns in T_rounded
zero_col_idxs = np.where(~arr_rounded.any(axis=0))[0]
for zero_col_idx in zero_col_idxs:
zero_col = arr[:, zero_col_idx]
matches = np.absolute(zero_col) == np.amax(np.absolute(zero_col))
row_idx_to_fix = np.where(matches)[0]
for i in row_idx_to_fix:
arr_rounded[i, zero_col_idx] = round_away_from_zero(arr[i, zero_col_idx])
return arr_rounded.astype(int)
class SubstituteSurfaceSiteTransformation(AbstractTransformation):
"""
Use AdsorptionSiteFinder to perform substitution-type doping on the surface
and returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
"""
def __init__(
self,
atom,
selective_dynamics=False,
height=0.9,
mi_vec=None,
target_species=None,
sub_both_sides=False,
range_tol=1e-2,
dist_from_surf=0,
):
"""
Args:
atom (str): atom corresponding to substitutional dopant
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec : vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
target_species: List of specific species to substitute
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface
"""
self.atom = atom
self.selective_dynamics = selective_dynamics
self.height = height
self.mi_vec = mi_vec
self.target_species = target_species
self.sub_both_sides = sub_both_sides
self.range_tol = range_tol
self.dist_from_surf = dist_from_surf
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: Must be a Slab structure
return_ranked_list: Whether or not multiple structures are
returned. If return_ranked_list is a number, up to that number of
structures is returned.
Returns: Slab with sites substituted
"""
sitefinder = AdsorbateSiteFinder(
structure,
selective_dynamics=self.selective_dynamics,
height=self.height,
mi_vec=self.mi_vec,
)
structures = sitefinder.generate_substitution_structures(
self.atom,
target_species=self.target_species,
sub_both_sides=self.sub_both_sides,
range_tol=self.range_tol,
dist_from_surf=self.dist_from_surf,
)
if not return_ranked_list:
return structures[0]
return [{"structure": structure} for structure in structures[:return_ranked_list]]
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
def _proj(b, a):
"""
Returns vector projection (np.ndarray) of vector b (np.ndarray)
onto vector a (np.ndarray)
"""
return (b.T @ (a / np.linalg.norm(a))) * (a / np.linalg.norm(a))
class SQSTransformation(AbstractTransformation):
"""
A transformation that creates a special quasirandom structure (SQS) from a structure with partial occupancies.
"""
def __init__(
self,
scaling,
cluster_size_and_shell=None,
search_time=60,
directory=None,
instances=None,
temperature=1,
wr=1,
wn=1,
wd=0.5,
tol=1e-3,
best_only=True,
remove_duplicate_structures=True,
reduction_algo="LLL",
):
"""
Args:
structure (Structure): Disordered pymatgen Structure object
scaling (int or list): Scaling factor to determine supercell. Two options are possible:
a. (preferred) Scales number of atoms, e.g., for a structure with 8 atoms,
scaling=4 would lead to a 32 atom supercell
b. A sequence of three scaling factors, e.g., [2, 1, 1], which
specifies that the supercell should have dimensions 2a x b x c
cluster_size_and_shell (Optional[Dict[int, int]]): Dictionary of cluster interactions with entries in
the form number of atoms: nearest neighbor shell
Keyword Args:
search_time (float): Time spent looking for the ideal SQS in minutes (default: 60)
directory (str): Directory to run mcsqs calculation and store files (default: None
runs calculations in a temp directory)
instances (int): Specifies the number of parallel instances of mcsqs to run
(default: number of cpu cores detected by Python)
temperature (int or float): Monte Carlo temperature (default: 1), "T" in atat code
wr (int or float): Weight assigned to range of perfect correlation match in objective
function (default = 1)
wn (int or float): Multiplicative decrease in weight per additional point in cluster (default: 1)
wd (int or float): Exponent of decay in weight as function of cluster diameter (default: 0)
tol (int or float): Tolerance for matching correlations (default: 1e-3)
best_only (bool): only return structures with lowest objective function
remove_duplicate_structures (bool): only return unique structures
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"False" does not reduce structure.
"""
self.scaling = scaling
self.search_time = search_time
self.cluster_size_and_shell = cluster_size_and_shell
self.directory = directory
self.instances = instances
self.temperature = temperature
self.wr = wr
self.wn = wn
self.wd = wd
self.tol = tol
self.best_only = best_only
self.remove_duplicate_structures = remove_duplicate_structures
self.reduction_algo = reduction_algo
@staticmethod
def _get_max_neighbor_distance(struc, shell):
"""
Calculate maximum nearest neighbor distance
Args:
struc: pymatgen Structure object
shell: nearest neighbor shell, such that shell=1 is the first nearest
neighbor, etc.
Returns:
maximum nearest neighbor distance, in angstroms
"""
mdnn = MinimumDistanceNN()
distances = []
for site_num, site in enumerate(struc):
shell_info = mdnn.get_nn_shell_info(struc, site_num, shell)
for entry in shell_info:
image = entry["image"]
distance = site.distance(struc[entry["site_index"]], jimage=image)
distances.append(distance)
return max(distances)
@staticmethod
def _get_disordered_substructure(struc_disordered):
"""
Converts disordered structure into a substructure consisting of only disordered sites
Args:
struc_disordered: pymatgen disordered Structure object
Returns:
pymatgen Structure object representing a substructure of disordered sites
"""
disordered_substructure = struc_disordered.copy()
idx_to_remove = []
for idx, site in enumerate(disordered_substructure.sites):
if site.is_ordered:
idx_to_remove.append(idx)
disordered_substructure.remove_sites(idx_to_remove)
return disordered_substructure
@staticmethod
def _sqs_cluster_estimate(struc_disordered, cluster_size_and_shell: Optional[Dict[int, int]] = None):
"""
Set up an ATAT cluster.out file for a given structure and set of constraints
Args:
struc_disordered: disordered pymatgen Structure object
cluster_size_and_shell: dict of integers {cluster: shell}
Returns:
dict of {cluster size: distance in angstroms} for mcsqs calculation
"""
cluster_size_and_shell = cluster_size_and_shell or {2: 3, 3: 2, 4: 1}
disordered_substructure = SQSTransformation._get_disordered_substructure(struc_disordered)
clusters = {}
for cluster_size, shell in cluster_size_and_shell.items():
max_distance = SQSTransformation._get_max_neighbor_distance(disordered_substructure, shell)
clusters[cluster_size] = max_distance + 0.01 # add small tolerance
return clusters
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies SQS transformation
Args:
structure (pymatgen Structure): pymatgen Structure with partial occupancies
return_ranked_list (bool): number of structures to return
Returns:
pymatgen Structure which is an SQS of the input structure
"""
if return_ranked_list and self.instances is None:
raise ValueError("mcsqs has no instances, so cannot return a ranked list")
if (
isinstance(return_ranked_list, int)
and isinstance(self.instances, int)
and return_ranked_list > self.instances
):
raise ValueError("return_ranked_list cannot be less that number of instances")
clusters = self._sqs_cluster_estimate(structure, self.cluster_size_and_shell)
# useful for debugging and understanding
self._last_used_clusters = clusters
sqs = run_mcsqs(
structure=structure,
clusters=clusters,
scaling=self.scaling,
search_time=self.search_time,
directory=self.directory,
instances=self.instances,
temperature=self.temperature,
wr=self.wr,
wn=self.wn,
wd=self.wd,
tol=self.tol,
)
return self._get_unique_bestsqs_strucs(
sqs,
best_only=self.best_only,
return_ranked_list=return_ranked_list,
remove_duplicate_structures=self.remove_duplicate_structures,
reduction_algo=self.reduction_algo,
)
@staticmethod
def _get_unique_bestsqs_strucs(sqs, best_only, return_ranked_list, remove_duplicate_structures, reduction_algo):
"""
Gets unique sqs structures with lowest objective function. Requires an mcsqs output that has been run
in parallel, otherwise returns Sqs.bestsqs
Args:
sqs (Sqs): Sqs class object.
best_only (bool): only return structures with lowest objective function.
return_ranked_list (bool): Number of structures to return.
remove_duplicate_structures (bool): only return unique structures.
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"False" does not reduce structure.
Returns:
list of dicts of the form {'structure': Structure, 'objective_function': ...}, unless run in serial
(returns a single structure Sqs.bestsqs)
"""
if not return_ranked_list:
return_struc = sqs.bestsqs
# reduce structure
if reduction_algo:
return_struc = return_struc.get_reduced_structure(reduction_algo=reduction_algo)
# return just the structure
return return_struc
strucs = []
for d in sqs.allsqs:
# filter for best structures only if enabled, else use full sqs.all_sqs list
if (not best_only) or (best_only and d["objective_function"] == sqs.objective_function):
struc = d["structure"]
# add temporary objective_function attribute to access objective_function after grouping
struc.objective_function = d["objective_function"]
strucs.append(struc)
if remove_duplicate_structures:
matcher = StructureMatcher()
# sort by unique structures ... can take a while for a long list of strucs
unique_strucs_grouped = matcher.group_structures(strucs)
# get unique structures only
strucs = [group[0] for group in unique_strucs_grouped]
# sort structures by objective function
strucs.sort(key=lambda x: x.objective_function if isinstance(x.objective_function, float) else -np.inf)
to_return = [{"structure": struc, "objective_function": struc.objective_function} for struc in strucs]
for d in to_return:
# delete temporary objective_function attribute
del d["structure"].objective_function
# reduce structure
if reduction_algo:
d["structure"] = d["structure"].get_reduced_structure(reduction_algo=reduction_algo)
if isinstance(return_ranked_list, int):
return to_return[:return_ranked_list]
return to_return
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class MonteCarloRattleTransformation(AbstractTransformation):
r"""
Uses a Monte Carlo rattle procedure to randomly perturb the sites in a
structure.
This class requires the hiPhive package to be installed.
Rattling atom `i` is carried out as a Monte Carlo move that is accepted with
a probability determined from the minimum interatomic distance
:math:`d_{ij}`. If :math:`\\min(d_{ij})` is smaller than :math:`d_{min}`
the move is only accepted with a low probability.
This process is repeated for each atom a number of times meaning
the magnitude of the final displacements is not *directly*
connected to `rattle_std`.
"""
@requires(hiphive, "hiphive is required for MonteCarloRattleTransformation")
def __init__(self, rattle_std: float, min_distance: float, seed: Optional[int] = None, **kwargs):
"""
Args:
rattle_std: Rattle amplitude (standard deviation in normal
distribution). Note: this value is not *directly* connected to the
final average displacement for the structures
min_distance: Interatomic distance used for computing the probability
for each rattle move.
seed: Seed for setting up NumPy random state from which random numbers
are generated. If ``None``, a random seed will be generated
(default). This option allows the output of this transformation
to be deterministic.
**kwargs: Additional keyword arguments to be passed to the hiPhive
mc_rattle function.
"""
self.rattle_std = rattle_std
self.min_distance = min_distance
self.seed = seed
if not seed:
# if seed is None, use a random RandomState seed but make sure
# we store that the original seed was None
seed = np.random.randint(1, 1000000000)
self.random_state = np.random.RandomState(seed) # pylint: disable=E1101
self.kwargs = kwargs
def apply_transformation(self, structure: Structure) -> Structure:
"""
Apply the transformation.
Args:
structure: Input Structure
Returns:
Structure with sites perturbed.
"""
from hiphive.structure_generation.rattle import mc_rattle # type: ignore
atoms = AseAtomsAdaptor.get_atoms(structure)
seed = self.random_state.randint(1, 1000000000)
displacements = mc_rattle(atoms, self.rattle_std, self.min_distance, seed=seed, **self.kwargs)
transformed_structure = Structure(
structure.lattice,
structure.species,
structure.cart_coords + displacements,
coords_are_cartesian=True,
)
return transformed_structure
def __str__(self):
return f"{__name__} : rattle_std = {self.rattle_std}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
| vorwerkc/pymatgen | pymatgen/transformations/advanced_transformations.py | Python | mit | 88,361 | [
"ASE",
"CRYSTAL",
"pymatgen"
] | 3ee9d06ce4af6a3be9b7c08f87d4c9afced68d9cd311f39ce209be1e6e83a83b |
from __future__ import absolute_import
from __future__ import division
import deprecation
import numpy as np
import scipy.ndimage as scind
from scipy.ndimage import (
binary_erosion,
convolve,
gaussian_filter,
generate_binary_structure,
label,
map_coordinates,
)
from . import _filter
from .rankorder import rank_order
from .smooth import smooth_with_function_and_mask
from .cpmorphology import fixup_scipy_ndimage_result as fix
from .cpmorphology import (
centers_of_labels,
convex_hull_ijv,
get_line_pts,
grey_dilation,
grey_erosion,
grey_reconstruction,
)
from six.moves import range
"""# of points handled in the first pass of the convex hull code"""
CONVEX_HULL_CHUNKSIZE = 250000
def stretch(image, mask=None):
"""Normalize an image to make the minimum zero and maximum one
image - pixel data to be normalized
mask - optional mask of relevant pixels. None = don't mask
returns the stretched image
"""
image = np.array(image, float)
if np.product(image.shape) == 0:
return image
if mask is None:
minval = np.min(image)
maxval = np.max(image)
if minval == maxval:
if minval < 0:
return np.zeros_like(image)
elif minval > 1:
return np.ones_like(image)
return image
else:
return (image - minval) / (maxval - minval)
else:
significant_pixels = image[mask]
if significant_pixels.size == 0:
return image
minval = np.min(significant_pixels)
maxval = np.max(significant_pixels)
if minval == maxval:
transformed_image = minval
else:
transformed_image = (significant_pixels - minval) / (maxval - minval)
result = image.copy()
image[mask] = transformed_image
return image
def unstretch(image, minval, maxval):
"""Perform the inverse of stretch, given a stretched image
image - an image stretched by stretch or similarly scaled value or values
minval - minimum of previously stretched image
maxval - maximum of previously stretched image
"""
return image * (maxval - minval) + minval
def median_filter(data, mask, radius, percent=50):
"""Masked median filter with octagonal shape
data - array of data to be median filtered.
mask - mask of significant pixels in data
radius - the radius of a circle inscribed into the filtering octagon
percent - conceptually, order the significant pixels in the octagon,
count them and choose the pixel indexed by the percent
times the count divided by 100. More simply, 50 = median
returns a filtered array. In areas where the median filter does
not overlap the mask, the filtered result is undefined, but in
practice, it will be the lowest value in the valid area.
"""
if mask is None:
mask = np.ones(data.shape, dtype=bool)
if np.all(~mask):
return data.copy()
#
# Normalize the ranked data to 0-255
#
if not np.issubdtype(data.dtype, np.int) or np.min(data) < 0 or np.max(data) > 255:
ranked_data, translation = rank_order(data[mask], nbins=255)
was_ranked = True
else:
ranked_data = data[mask]
was_ranked = False
input = np.zeros(data.shape, np.uint8)
input[mask] = ranked_data
mmask = np.ascontiguousarray(mask, np.uint8)
output = np.zeros(data.shape, np.uint8)
_filter.median_filter(input, mmask, output, radius, percent)
if was_ranked:
result = translation[output]
else:
result = output
return result
@deprecation.deprecated(
current_version="2.0.0",
deprecated_in="2.0.0",
details="replaced by the `skimage.restoration.denoise_bilateral` function from `scikit-image` instead",
removed_in="2.1.0",
)
def bilateral_filter(
image, mask, sigma_spatial, sigma_range, sampling_spatial=None, sampling_range=None
):
"""Bilateral filter of an image
image - image to be bilaterally filtered
mask - mask of significant points in image
sigma_spatial - standard deviation of the spatial Gaussian
sigma_range - standard deviation of the range Gaussian
sampling_spatial - amt to reduce image array extents when sampling
default is 1/2 sigma_spatial
sampling_range - amt to reduce the range of values when sampling
default is 1/2 sigma_range
The bilateral filter is described by the following equation:
sum(Fs(||p - q||)Fr(|Ip - Iq|)Iq) / sum(Fs(||p-q||)Fr(|Ip - Iq))
where the sum is over all points in the kernel
p is all coordinates in the image
q is the coordinates as perturbed by the mask
Ip is the intensity at p
Iq is the intensity at q
Fs is the spatial convolution function, for us a Gaussian that
falls off as the distance between falls off
Fr is the "range" distance which falls off as the difference
in intensity increases.
1 / sum(Fs(||p-q||)Fr(|Ip - Iq)) is the weighting for point p
"""
# The algorithm is taken largely from code by Jiawen Chen which miraculously
# extends to the masked case:
# http://groups.csail.mit.edu/graphics/bilagrid/bilagrid_web.pdf
#
# Form a 3-d array whose extent is reduced in the i,j directions
# by the spatial sampling parameter and whose extent is reduced in the
# z (image intensity) direction by the range sampling parameter.
# Scatter each significant pixel in the image into the nearest downsampled
# array address where the pixel's i,j coordinate gives the corresponding
# i and j in the matrix and the intensity value gives the corresponding z
# in the array.
# Count the # of values entered into each 3-d array element to form a
# weight.
# Similarly convolve the downsampled value and weight arrays with a 3-d
# Gaussian kernel whose i and j Gaussian is the sigma_spatial and whose
# z is the sigma_range.
#
# Divide the value by the weight to scale each z value appropriately
#
# Linearly interpolate using an i x j x 3 array where [:,:,0] is the
# i coordinate in the downsampled array, [:,:,1] is the j coordinate
# and [:,:,2] is the unrounded index of the z-slot
#
# One difference is that I don't pad the intermediate arrays. The
# weights bleed off the edges of the intermediate arrays and this
# accounts for the ring of zero values used at the border bleeding
# back into the intermediate arrays during convolution
#
if sampling_spatial is None:
sampling_spatial = sigma_spatial / 2.0
if sampling_range is None:
sampling_range = sigma_range / 2.0
if np.all(np.logical_not(mask)):
return image
masked_image = image[mask]
image_min = np.min(masked_image)
image_max = np.max(masked_image)
image_delta = image_max - image_min
if image_delta == 0:
return image
#
# ds = downsampled. Calculate the ds array sizes and sigmas.
#
ds_sigma_spatial = sigma_spatial / sampling_spatial
ds_sigma_range = sigma_range / sampling_range
ds_i_limit = int(image.shape[0] / sampling_spatial) + 2
ds_j_limit = int(image.shape[1] / sampling_spatial) + 2
ds_z_limit = int(image_delta / sampling_range) + 2
grid_data = np.zeros((ds_i_limit, ds_j_limit, ds_z_limit))
grid_weights = np.zeros((ds_i_limit, ds_j_limit, ds_z_limit))
#
# Compute the downsampled i, j and z coordinates at each point
#
di, dj = (
np.mgrid[0 : image.shape[0], 0 : image.shape[1]].astype(float)
/ sampling_spatial
)
dz = (masked_image - image_min) / sampling_range
#
# Treat this as a list of 3-d coordinates from now on
#
di = di[mask]
dj = dj[mask]
#
# scatter the unmasked image points into the data array and
# scatter a value of 1 per point into the weights
#
grid_data[
(di + 0.5).astype(int), (dj + 0.5).astype(int), (dz + 0.5).astype(int)
] += masked_image
grid_weights[
(di + 0.5).astype(int), (dj + 0.5).astype(int), (dz + 0.5).astype(int)
] += 1
#
# Make a Gaussian kernel
#
kernel_spatial_limit = int(2 * ds_sigma_spatial) + 1
kernel_range_limit = int(2 * ds_sigma_range) + 1
ki, kj, kz = np.mgrid[
-kernel_spatial_limit : kernel_spatial_limit + 1,
-kernel_spatial_limit : kernel_spatial_limit + 1,
-kernel_range_limit : kernel_range_limit + 1,
]
kernel = np.exp(
-0.5
* ((ki ** 2 + kj ** 2) / ds_sigma_spatial ** 2 + kz ** 2 / ds_sigma_range ** 2)
)
blurred_grid_data = convolve(grid_data, kernel, mode="constant")
blurred_weights = convolve(grid_weights, kernel, mode="constant")
weight_mask = blurred_weights > 0
normalized_blurred_grid = np.zeros(grid_data.shape)
normalized_blurred_grid[weight_mask] = (
blurred_grid_data[weight_mask] / blurred_weights[weight_mask]
)
#
# Now use di, dj and dz to find the coordinate of the point within
# the blurred grid to use. We actually interpolate between points
# here (both in the i,j direction to get intermediate z values and in
# the z direction to get the slot, roughly where we put our original value)
#
dijz = np.vstack((di, dj, dz))
image_copy = image.copy()
image_copy[mask] = map_coordinates(normalized_blurred_grid, dijz, order=1)
return image_copy
def laplacian_of_gaussian(image, mask, size, sigma):
"""Perform the Laplacian of Gaussian transform on the image
image - 2-d image array
mask - binary mask of significant pixels
size - length of side of square kernel to use
sigma - standard deviation of the Gaussian
"""
half_size = size // 2
i, j = np.mgrid[-half_size : half_size + 1, -half_size : half_size + 1].astype(
float
) / float(sigma)
distance = (i ** 2 + j ** 2) / 2
gaussian = np.exp(-distance)
#
# Normalize the Gaussian
#
gaussian = gaussian / np.sum(gaussian)
log = (distance - 1) * gaussian
#
# Normalize the kernel to have a sum of zero
#
log = log - np.mean(log)
if mask is None:
mask = np.ones(image.shape[:2], bool)
masked_image = image.copy()
masked_image[~mask] = 0
output = convolve(masked_image, log, mode="constant", cval=0)
#
# Do the LoG of the inverse of the mask. This finds the magnitude of the
# contribution of the masked pixels. We then fudge by multiplying by the
# value at the pixel of interest - this effectively sets the value at a
# masked pixel to that of the pixel of interest.
#
# It underestimates the LoG, that's not a terrible thing.
#
correction = convolve((~mask).astype(float), log, mode="constant", cval=1)
output += correction * image
output[~mask] = image[~mask]
return output
def masked_convolution(data, mask, kernel):
data = np.ascontiguousarray(data, np.float64)
kernel = np.ascontiguousarray(kernel, np.float64)
return _filter.masked_convolution(data, mask, kernel)
def canny(image, mask, sigma, low_threshold, high_threshold):
"""Edge filter an image using the Canny algorithm.
sigma - the standard deviation of the Gaussian used
low_threshold - threshold for edges that connect to high-threshold
edges
high_threshold - threshold of a high-threshold edge
Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
William Green's Canny tutorial
http://www.pages.drexel.edu/~weg22/can_tut.html
"""
#
# The steps involved:
#
# * Smooth using the Gaussian with sigma above.
#
# * Apply the horizontal and vertical Sobel operators to get the gradients
# within the image. The edge strength is the sum of the magnitudes
# of the gradients in each direction.
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
fsmooth = lambda x: gaussian_filter(x, sigma, mode="constant")
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
jsobel = convolve(smoothed, [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
isobel = convolve(smoothed, [[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.sqrt(isobel * isobel + jsobel * jsobel)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
emask = binary_erosion(mask, s, border_value=0)
emask = np.logical_and(emask, magnitude > 0)
#
# --------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(image.shape, bool)
# ----- 0 to 45 degrees ------
pts_plus = np.logical_and(
isobel >= 0, np.logical_and(jsobel >= 0, abs_isobel >= abs_jsobel)
)
pts_minus = np.logical_and(
isobel <= 0, np.logical_and(jsobel <= 0, abs_isobel >= abs_jsobel)
)
pts = np.logical_or(pts_plus, pts_minus)
pts = np.logical_and(emask, pts)
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = np.logical_and(c_plus, c_minus)
# ----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = np.logical_and(
isobel >= 0, np.logical_and(jsobel >= 0, abs_isobel <= abs_jsobel)
)
pts_minus = np.logical_and(
isobel <= 0, np.logical_and(jsobel <= 0, abs_isobel <= abs_jsobel)
)
pts = np.logical_or(pts_plus, pts_minus)
pts = np.logical_and(emask, pts)
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = np.logical_and(c_plus, c_minus)
# ----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = np.logical_and(
isobel <= 0, np.logical_and(jsobel >= 0, abs_isobel <= abs_jsobel)
)
pts_minus = np.logical_and(
isobel >= 0, np.logical_and(jsobel <= 0, abs_isobel <= abs_jsobel)
)
pts = np.logical_or(pts_plus, pts_minus)
pts = np.logical_and(emask, pts)
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
cc = np.logical_and(c_plus, c_minus)
local_maxima[pts] = np.logical_and(c_plus, c_minus)
# ----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = np.logical_and(
isobel <= 0, np.logical_and(jsobel >= 0, abs_isobel >= abs_jsobel)
)
pts_minus = np.logical_and(
isobel >= 0, np.logical_and(jsobel <= 0, abs_isobel >= abs_jsobel)
)
pts = np.logical_or(pts_plus, pts_minus)
pts = np.logical_and(emask, pts)
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = np.logical_and(c_plus, c_minus)
#
# ---- Create two masks at the two thresholds.
#
high_mask = np.logical_and(local_maxima, magnitude >= high_threshold)
low_mask = np.logical_and(local_maxima, magnitude >= low_threshold)
#
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
#
labels, count = label(low_mask, np.ones((3, 3), bool))
if count == 0:
return low_mask
sums = fix(scind.sum(high_mask, labels, np.arange(count, dtype=np.int32) + 1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
def roberts(image, mask=None):
"""Find edges using the Roberts algorithm
image - the image to process
mask - mask of relevant points
The algorithm returns the magnitude of the output of the two Roberts
convolution kernels.
The following is the canonical citation for the algorithm:
L. Roberts Machine Perception of 3-D Solids, Optical and
Electro-optical Information Processing, MIT Press 1965.
The following website has a tutorial on the algorithm:
http://homepages.inf.ed.ac.uk/rbf/HIPR2/roberts.htm
"""
result = np.zeros(image.shape)
#
# Four quadrants and two convolutions:
#
# q0,0 | q0,1 1 | 0 anti-diagonal
# q1,0 | q1,1 0 | -1
#
# q-1,0 | q0,0 0 | 1 diagonal
# q-1,1 | q0,1 -1 | 0
#
# Points near the mask edges and image edges are computed unreliably
# so make them zero (no edge) in the result
#
if mask is None:
mask = np.ones(image.shape, bool)
big_mask = binary_erosion(mask, generate_binary_structure(2, 2), border_value=0)
result[big_mask == False] = 0
q00 = image[:, :][big_mask]
q11 = image[1:, 1:][big_mask[:-1, :-1]]
qm11 = image[:-1, 1:][big_mask[1:, :-1]]
diagonal = q00 - qm11
anti_diagonal = q00 - q11
result[big_mask] = np.sqrt(diagonal * diagonal + anti_diagonal * anti_diagonal)
return result
def sobel(image, mask=None):
"""Calculate the absolute magnitude Sobel to find the edges
image - image to process
mask - mask of relevant points
Take the square root of the sum of the squares of the horizontal and
vertical Sobels to get a magnitude that's somewhat insensitive to
direction.
Note that scipy's Sobel returns a directional Sobel which isn't
useful for edge detection in its raw form.
"""
return np.sqrt(hsobel(image, mask) ** 2 + vsobel(image, mask) ** 2)
def hsobel(image, mask=None):
"""Find the horizontal edges of an image using the Sobel transform
image - image to process
mask - mask of relevant points
We use the following kernel and return the absolute value of the
result at each point:
1 2 1
0 0 0
-1 -2 -1
"""
if mask is None:
mask = np.ones(image.shape, bool)
big_mask = binary_erosion(mask, generate_binary_structure(2, 2), border_value=0)
result = np.abs(
convolve(
image, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).astype(float) / 4.0
)
)
result[big_mask == False] = 0
return result
def vsobel(image, mask=None):
"""Find the vertical edges of an image using the Sobel transform
image - image to process
mask - mask of relevant points
We use the following kernel and return the absolute value of the
result at each point:
1 0 -1
2 0 -2
1 0 -1
"""
if mask is None:
mask = np.ones(image.shape, bool)
big_mask = binary_erosion(mask, generate_binary_structure(2, 2), border_value=0)
result = np.abs(
convolve(
image, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).astype(float) / 4.0
)
)
result[big_mask == False] = 0
return result
def prewitt(image, mask=None):
"""Find the edge magnitude using the Prewitt transform
image - image to process
mask - mask of relevant points
Return the square root of the sum of squares of the horizontal
and vertical Prewitt transforms.
"""
return np.sqrt(hprewitt(image, mask) ** 2 + vprewitt(image, mask) ** 2)
def hprewitt(image, mask=None):
"""Find the horizontal edges of an image using the Prewitt transform
image - image to process
mask - mask of relevant points
We use the following kernel and return the absolute value of the
result at each point:
1 1 1
0 0 0
-1 -1 -1
"""
if mask is None:
mask = np.ones(image.shape, bool)
big_mask = binary_erosion(mask, generate_binary_structure(2, 2), border_value=0)
result = np.abs(
convolve(
image, np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]]).astype(float) / 3.0
)
)
result[big_mask == False] = 0
return result
def vprewitt(image, mask=None):
"""Find the vertical edges of an image using the Prewitt transform
image - image to process
mask - mask of relevant points
We use the following kernel and return the absolute value of the
result at each point:
1 0 -1
1 0 -1
1 0 -1
"""
if mask is None:
mask = np.ones(image.shape, bool)
big_mask = binary_erosion(mask, generate_binary_structure(2, 2), border_value=0)
result = np.abs(
convolve(
image, np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]).astype(float) / 3.0
)
)
result[big_mask == False] = 0
return result
def gabor(image, labels, frequency, theta):
"""Gabor-filter the objects in an image
image - 2-d grayscale image to filter
labels - a similarly shaped labels matrix
frequency - cycles per trip around the circle
theta - angle of the filter. 0 to 2 pi
Calculate the Gabor filter centered on the centroids of each object
in the image. Summing the resulting image over the labels matrix will
yield a texture measure per object.
"""
#
# The code inscribes the X and Y position of each pixel relative to
# the centroid of that pixel's object. After that, the Gabor filter
# for the image can be calculated per-pixel and the image can be
# multiplied by the filter to get the filtered image.
#
nobjects = np.max(labels)
if nobjects == 0:
return image
centers = centers_of_labels(labels)
areas = fix(
scind.sum(np.ones(image.shape), labels, np.arange(nobjects, dtype=np.int32) + 1)
)
mask = labels > 0
i, j = np.mgrid[0 : image.shape[0], 0 : image.shape[1]].astype(float)
i = i[mask]
j = j[mask]
image = image[mask]
lm = labels[mask] - 1
i -= centers[0, lm]
j -= centers[1, lm]
sigma = np.sqrt(areas / np.pi) / 3.0
sigma = sigma[lm]
g_exp = (
1000.0
/ (2.0 * np.pi * sigma ** 2)
* np.exp(-(i ** 2 + j ** 2) / (2 * sigma ** 2))
)
g_angle = 2 * np.pi / frequency * (i * np.cos(theta) + j * np.sin(theta))
g_cos = g_exp * np.cos(g_angle)
g_sin = g_exp * np.sin(g_angle)
#
# Normalize so that the sum of the filter over each object is zero
# and so that there is no bias-value within each object.
#
g_cos_mean = fix(scind.mean(g_cos, lm, np.arange(nobjects)))
i_mean = fix(scind.mean(image, lm, np.arange(nobjects)))
i_norm = image - i_mean[lm]
g_sin_mean = fix(scind.mean(g_sin, lm, np.arange(nobjects)))
g_cos -= g_cos_mean[lm]
g_sin -= g_sin_mean[lm]
g = np.zeros(mask.shape, dtype=np.complex)
g[mask] = i_norm * g_cos + i_norm * g_sin * 1j
return g
def enhance_dark_holes(image, min_radius, max_radius, mask=None):
"""Enhance dark holes using a rolling ball filter
image - grayscale 2-d image
radii - a vector of radii: we enhance holes at each given radius
"""
#
# Do 4-connected erosion
#
se = np.array([[False, True, False], [True, True, True], [False, True, False]])
#
# Invert the intensities
#
inverted_image = image.max() - image
previous_reconstructed_image = inverted_image
eroded_image = inverted_image
smoothed_image = np.zeros(image.shape)
for i in range(max_radius + 1):
eroded_image = grey_erosion(eroded_image, mask=mask, footprint=se)
reconstructed_image = grey_reconstruction(
eroded_image, inverted_image, footprint=se
)
output_image = previous_reconstructed_image - reconstructed_image
if i >= min_radius:
smoothed_image = np.maximum(smoothed_image, output_image)
previous_reconstructed_image = reconstructed_image
return smoothed_image
def granulometry_filter(image, min_radius, max_radius, mask=None):
"""Enhances bright structures within a min and max radius using a rolling ball filter
image - grayscale 2-d image
radii - a vector of radii: we enhance holes at each given radius
"""
#
# Do 4-connected erosion
#
se = np.array([[False, True, False], [True, True, True], [False, True, False]])
#
# Initialize
#
inverted_image = image.max() - image
previous_opened_image = image
eroded_image = image
selected_granules_image = np.zeros(image.shape)
#
# Select granules by successive morphological openings
#
for i in range(max_radius + 1):
eroded_image = grey_erosion(eroded_image, mask=mask, footprint=se)
opened_image = grey_dilation(eroded_image, inverted_image, footprint=se)
output_image = previous_opened_image - opened_image
if i >= min_radius:
selected_granules_image = np.maximum(selected_granules_image, output_image)
previous_opened_image = opened_image
return selected_granules_image
def circular_average_filter(image, radius, mask=None):
"""Blur an image using a circular averaging filter (pillbox)
image - grayscale 2-d image
radii - radius of filter in pixels
The filter will be within a square matrix of side 2*radius+1
This code is translated straight from MATLAB's fspecial function
"""
crad = int(np.ceil(radius - 0.5))
x, y = np.mgrid[-crad : crad + 1, -crad : crad + 1].astype(float)
maxxy = np.maximum(abs(x), abs(y))
minxy = np.minimum(abs(x), abs(y))
m1 = (radius ** 2 < (maxxy + 0.5) ** 2 + (minxy - 0.5) ** 2) * (minxy - 0.5) + (
radius ** 2 >= (maxxy + 0.5) ** 2 + (minxy - 0.5) ** 2
) * np.real(np.sqrt(np.asarray(radius ** 2 - (maxxy + 0.5) ** 2, dtype=complex)))
m2 = (radius ** 2 > (maxxy - 0.5) ** 2 + (minxy + 0.5) ** 2) * (minxy + 0.5) + (
radius ** 2 <= (maxxy - 0.5) ** 2 + (minxy + 0.5) ** 2
) * np.real(np.sqrt(np.asarray(radius ** 2 - (maxxy - 0.5) ** 2, dtype=complex)))
sgrid = (
radius ** 2
* (
0.5 * (np.arcsin(m2 / radius) - np.arcsin(m1 / radius))
+ 0.25
* (np.sin(2 * np.arcsin(m2 / radius)) - np.sin(2 * np.arcsin(m1 / radius)))
)
- (maxxy - 0.5) * (m2 - m1)
+ (m1 - minxy + 0.5)
) * (
(
(
(radius ** 2 < (maxxy + 0.5) ** 2 + (minxy + 0.5) ** 2)
& (radius ** 2 > (maxxy - 0.5) ** 2 + (minxy - 0.5) ** 2)
)
| ((minxy == 0) & (maxxy - 0.5 < radius) & (maxxy + 0.5 >= radius))
)
)
sgrid = sgrid + ((maxxy + 0.5) ** 2 + (minxy + 0.5) ** 2 < radius ** 2)
sgrid[crad, crad] = np.minimum(np.pi * radius ** 2, np.pi / 2)
if (
(crad > 0)
and (radius > crad - 0.5)
and (radius ** 2 < (crad - 0.5) ** 2 + 0.25)
):
m1 = np.sqrt(radius ** 2 - (crad - 0.5) ** 2)
m1n = m1 / radius
sg0 = 2 * (
radius ** 2 * (0.5 * np.arcsin(m1n) + 0.25 * np.sin(2 * np.arcsin(m1n)))
- m1 * (crad - 0.5)
)
sgrid[2 * crad, crad] = sg0
sgrid[crad, 2 * crad] = sg0
sgrid[crad, 0] = sg0
sgrid[0, crad] = sg0
sgrid[2 * crad - 1, crad] = sgrid[2 * crad - 1, crad] - sg0
sgrid[crad, 2 * crad - 1] = sgrid[crad, 2 * crad - 1] - sg0
sgrid[crad, 1] = sgrid[crad, 1] - sg0
sgrid[1, crad] = sgrid[1, crad] - sg0
sgrid[crad, crad] = np.minimum(sgrid[crad, crad], 1)
kernel = sgrid / sgrid.sum()
output = convolve(image, kernel, mode="constant")
if mask is None:
mask = np.ones(image.shape, np.uint8)
else:
mask = np.array(mask, np.uint8)
output = masked_convolution(image, mask, kernel)
output[mask == 0] = image[mask == 0]
return output
#######################################
#
# Structure and ideas for the Kalman filter derived from u-track
# as described in
#
# Jaqaman, "Robust single-particle tracking in live-cell
# time-lapse sequences", NATURE METHODS | VOL.5 NO.8 | AUGUST 2008
#
#######################################
class KalmanState(object):
"""A data structure representing the state at a frame
The original method uses "feature" to mean the same thing as
CellProfiler's "object".
The state vector is somewhat abstract: it's up to the caller to
determine what each of the indices mean. For instance, in a model
with a 2-d position and velocity component, the state might be
i, j, di, dj
.observation_matrix - matrix to transform the state vector into the
observation vector. The observation matrix gives
the dimensions of the observation vector from its
i-shape and the dimensions of the state vector
from its j-shape. For observations of position
and states with velocity, the observation matrix
might be:
np.array([[1,0,0,0],
[0,1,0,0]])
.translation_matrix - matrix to translate the state vector from t-1 to t
For instance, the translation matrix for position
and velocity might be:
np.array([[1,0,1,0],
[0,1,0,1],
[0,0,1,0],
[0,0,0,1]])
.state_vec - an array of vectors per feature
.state_cov - the covariance matrix yielding the prediction. Each feature
has a 4x4 matrix that can be used to predict the new value
.noise_var - the variance of the state noise for each feature for each
vector element
.state_noise - a N x 4 array: the state noise for the i, j, vi and vj
.state_noise_idx - the feature indexes for each state noise vector
.obs_vec - the prediction for the observed variables
"""
def __init__(
self,
observation_matrix,
translation_matrix,
state_vec=None,
state_cov=None,
noise_var=None,
state_noise=None,
state_noise_idx=None,
):
self.observation_matrix = observation_matrix
self.translation_matrix = translation_matrix
if state_vec is not None:
self.state_vec = state_vec
else:
self.state_vec = np.zeros((0, self.state_len))
if state_cov is not None:
self.state_cov = state_cov
else:
self.state_cov = np.zeros((0, self.state_len, self.state_len))
if noise_var is not None:
self.noise_var = noise_var
else:
self.noise_var = np.zeros((0, self.state_len))
if state_noise is not None:
self.state_noise = state_noise
else:
self.state_noise = np.zeros((0, self.state_len))
if state_noise_idx is not None:
self.state_noise_idx = state_noise_idx
else:
self.state_noise_idx = np.zeros(0, int)
@property
def state_len(self):
"""# of elements in the state vector"""
return self.observation_matrix.shape[1]
@property
def obs_len(self):
"""# of elements in the observation vector"""
return self.observation_matrix.shape[0]
@property
def has_cached_predicted_state_vec(self):
"""True if next state vec has been calculated"""
return hasattr(self, "p_state_vec")
@property
def predicted_state_vec(self):
"""The predicted state vector for the next time point
From Welch eqn 1.9
"""
if not self.has_cached_predicted_state_vec:
self.p_state_vec = dot_n(
self.translation_matrix, self.state_vec[:, :, np.newaxis]
)[:, :, 0]
return self.p_state_vec
@property
def has_cached_obs_vec(self):
"""True if the observation vector for the next state has been calculated"""
return hasattr(self, "obs_vec")
@property
def predicted_obs_vec(self):
"""The predicted observation vector
The observation vector for the next step in the filter.
"""
if not self.has_cached_obs_vec:
self.obs_vec = dot_n(
self.observation_matrix, self.predicted_state_vec[:, :, np.newaxis]
)[:, :, 0]
return self.obs_vec
def map_frames(self, old_indices):
"""Rewrite the feature indexes based on the next frame's identities
old_indices - for each feature in the new frame, the index of the
old feature
"""
nfeatures = len(old_indices)
noldfeatures = len(self.state_vec)
if nfeatures > 0:
self.state_vec = self.state_vec[old_indices]
self.state_cov = self.state_cov[old_indices]
self.noise_var = self.noise_var[old_indices]
if self.has_cached_obs_vec:
self.obs_vec = self.obs_vec[old_indices]
if self.has_cached_predicted_state_vec:
self.p_state_vec = self.p_state_vec[old_indices]
if len(self.state_noise_idx) > 0:
#
# We have to renumber the new_state_noise indices and get rid
# of those that don't map to numbers. Typical index trick here:
# * create an array for each legal old element: -1 = no match
# * give each old element in the array the new number
# * Filter out the "no match" elements.
#
reverse_indices = -np.ones(noldfeatures, int)
reverse_indices[old_indices] = np.arange(nfeatures)
self.state_noise_idx = reverse_indices[self.state_noise_idx]
self.state_noise = self.state_noise[self.state_noise_idx != -1, :]
self.state_noise_idx = self.state_noise_idx[self.state_noise_idx != -1]
def add_features(
self, kept_indices, new_indices, new_state_vec, new_state_cov, new_noise_var
):
"""Add new features to the state
kept_indices - the mapping from all indices in the state to new
indices in the new version
new_indices - the indices of the new features in the new version
new_state_vec - the state vectors for the new indices
new_state_cov - the covariance matrices for the new indices
new_noise_var - the noise variances for the new indices
"""
assert len(kept_indices) == len(self.state_vec)
assert len(new_indices) == len(new_state_vec)
assert len(new_indices) == len(new_state_cov)
assert len(new_indices) == len(new_noise_var)
if self.has_cached_obs_vec:
del self.obs_vec
if self.has_cached_predicted_state_vec:
del self.predicted_obs_vec
nfeatures = len(kept_indices) + len(new_indices)
next_state_vec = np.zeros((nfeatures, self.state_len))
next_state_cov = np.zeros((nfeatures, self.state_len, self.state_len))
next_noise_var = np.zeros((nfeatures, self.state_len))
if len(kept_indices) > 0:
next_state_vec[kept_indices] = self.state_vec
next_state_cov[kept_indices] = self.state_cov
next_noise_var[kept_indices] = self.noise_var
if len(self.state_noise_idx) > 0:
self.state_noise_idx = kept_indices[self.state_noise_idx]
if len(new_indices) > 0:
next_state_vec[new_indices] = new_state_vec
next_state_cov[new_indices] = new_state_cov
next_noise_var[new_indices] = new_noise_var
self.state_vec = next_state_vec
self.state_cov = next_state_cov
self.noise_var = next_noise_var
def deep_copy(self):
"""Return a deep copy of the state"""
c = KalmanState(self.observation_matrix, self.translation_matrix)
c.state_vec = self.state_vec.copy()
c.state_cov = self.state_cov.copy()
c.noise_var = self.noise_var.copy()
c.state_noise = self.state_noise.copy()
c.state_noise_idx = self.state_noise_idx.copy()
return c
LARGE_KALMAN_COV = 2000
SMALL_KALMAN_COV = 1
def velocity_kalman_model():
"""Return a KalmanState set up to model objects with constant velocity
The observation and measurement vectors are i,j.
The state vector is i,j,vi,vj
"""
om = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
tm = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]])
return KalmanState(om, tm)
def reverse_velocity_kalman_model():
"""Return a KalmanState set up to model going backwards in time"""
om = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
tm = np.array([[1, 0, -1, 0], [0, 1, 0, -1], [0, 0, 1, 0], [0, 0, 0, 1]])
return KalmanState(om, tm)
def static_kalman_model():
"""Return a KalmanState set up to model objects whose motion is random
The observation, measurement and state vectors are all i,j
"""
return KalmanState(np.eye(2), np.eye(2))
def kalman_filter(kalman_state, old_indices, coordinates, q, r):
"""Return the kalman filter for the features in the new frame
kalman_state - state from last frame
old_indices - the index per feature in the last frame or -1 for new
coordinates - Coordinates of the features in the new frame.
q - the process error covariance - see equ 1.3 and 1.10 from Welch
r - measurement error covariance of features - see eqn 1.7 and 1.8 from welch.
returns a new KalmanState containing the kalman filter of
the last state by the given coordinates.
Refer to kalmanGainLinearMotion.m and
http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf
for info on the algorithm.
"""
assert isinstance(kalman_state, KalmanState)
old_indices = np.array(old_indices)
if len(old_indices) == 0:
return KalmanState(
kalman_state.observation_matrix, kalman_state.translation_matrix
)
#
# Cull missing features in old state and collect only matching coords
#
matching = old_indices != -1
new_indices = np.arange(len(old_indices))[~matching]
retained_indices = np.arange(len(old_indices))[matching]
new_coords = coordinates[new_indices]
observation_matrix_t = kalman_state.observation_matrix.transpose()
if len(retained_indices) > 0:
kalman_state = kalman_state.deep_copy()
coordinates = coordinates[retained_indices]
kalman_state.map_frames(old_indices[retained_indices])
#
# Time update equations
#
# From eqn 1.9 of Welch
#
state_vec = kalman_state.predicted_state_vec
#
# From eqn 1.10 of Welch
#
state_cov = (
dot_n(
dot_n(kalman_state.translation_matrix, kalman_state.state_cov),
kalman_state.translation_matrix.transpose(),
)
+ q[matching]
)
#
# From eqn 1.11 of welch
#
kalman_gain_numerator = dot_n(state_cov, observation_matrix_t)
kalman_gain_denominator = (
dot_n(
dot_n(kalman_state.observation_matrix, state_cov), observation_matrix_t
)
+ r[matching]
)
kalman_gain_denominator = inv_n(kalman_gain_denominator)
kalman_gain = dot_n(kalman_gain_numerator, kalman_gain_denominator)
#
# Eqn 1.12 of Welch
#
difference = (
coordinates
- dot_n(kalman_state.observation_matrix, state_vec[:, :, np.newaxis])[
:, :, 0
]
)
state_noise = dot_n(kalman_gain, difference[:, :, np.newaxis])[:, :, 0]
state_vec = state_vec + state_noise
#
# Eqn 1.13 of Welch (factored from (I - KH)P to P - KHP)
#
state_cov = state_cov - dot_n(
dot_n(kalman_gain, kalman_state.observation_matrix), state_cov
)
#
# Collect all of the state noise in one array. We produce an I and J
# variance. Notes in kalmanGainLinearMotion indicate that you
# might want a single variance, combining I & J. An alternate
# might be R and theta, a variance of angular consistency and one
# of absolute velocity.
#
# Add an index to the state noise in the rightmost column
#
idx = np.arange(len(state_noise))
#
# Stack the rows with the old ones
#
all_state_noise = np.vstack((kalman_state.state_noise, state_noise))
all_state_noise_idx = np.hstack((kalman_state.state_noise_idx, idx))
noise_var = np.zeros((len(idx), all_state_noise.shape[1]))
for i in range(all_state_noise.shape[1]):
noise_var[:, i] = fix(
scind.variance(all_state_noise[:, i], all_state_noise_idx, idx)
)
obs_vec = dot_n(kalman_state.observation_matrix, state_vec[:, :, np.newaxis])[
:, :, 0
]
kalman_state = KalmanState(
kalman_state.observation_matrix,
kalman_state.translation_matrix,
state_vec,
state_cov,
noise_var,
all_state_noise,
all_state_noise_idx,
)
else:
# Erase all previous features
kalman_state = KalmanState(
kalman_state.observation_matrix, kalman_state.translation_matrix
)
if len(new_coords) > 0:
#
# Fill in the initial states:
#
state_vec = dot_n(observation_matrix_t, new_coords[:, :, np.newaxis])[:, :, 0]
#
# The COV for the hidden, undetermined features should be large
# and the COV for others should be small
#
nstates = kalman_state.state_len
nnew_features = len(new_indices)
cov_vec = SMALL_KALMAN_COV / np.dot(
observation_matrix_t, np.ones(kalman_state.obs_len)
)
cov_vec[~np.isfinite(cov_vec)] = LARGE_KALMAN_COV
cov_matrix = np.diag(cov_vec)
state_cov = cov_matrix[np.newaxis, :, :][np.zeros(nnew_features, int)]
#
# The noise variance is all ones in Jaqman
#
noise_var = np.ones((len(new_indices), kalman_state.state_len))
#
# Map the retained indices to their new slots and new ones to empty
# slots (=-1)
#
kalman_state.add_features(
retained_indices, new_indices, state_vec, state_cov, noise_var
)
return kalman_state
def line_integration(image, angle, decay, sigma):
"""Integrate the image along the given angle
DIC images are the directional derivative of the underlying
image. This filter reconstructs the original image by integrating
along that direction.
image - a 2-dimensional array
angle - shear angle in radians. We integrate perpendicular to this angle
decay - an exponential decay applied to the integration
sigma - the standard deviation of a Gaussian which is used to
smooth the image in the direction parallel to the shear angle.
"""
#
# Normalize the image so that the mean is zero
#
normalized = image - np.mean(image)
#
# Rotate the image so the J direction is perpendicular to the shear angle.
#
rotated = scind.rotate(normalized, -angle)
#
# Smooth in only the i direction
#
smoothed = scind.gaussian_filter1d(rotated, sigma) if sigma > 0 else rotated
#
# We want img_out[:,j+1] to be img_out[:,j] * decay + img[j+1]
# Could be done by convolution with a ramp, maybe in FFT domain,
# but we just do a bunch of steps here.
#
result_fwd = smoothed.copy()
for i in range(1, result_fwd.shape[0]):
result_fwd[i] += result_fwd[i - 1] * decay
result_rev = smoothed.copy()
for i in reversed(range(result_rev.shape[0] - 1)):
result_rev[i] += result_rev[i + 1] * decay
result = (result_fwd - result_rev) / 2
#
# Rotate and chop result
#
result = scind.rotate(result, angle)
ipad = int((result.shape[0] - image.shape[0]) / 2)
jpad = int((result.shape[1] - image.shape[1]) / 2)
result = result[ipad : (ipad + image.shape[0]), jpad : (jpad + image.shape[1])]
#
# Scale the resultant image similarly to the output.
#
img_min, img_max = np.min(image), np.max(image)
result_min, result_max = np.min(result), np.max(result)
if (img_min == img_max) or (result_min == result_max):
return np.zeros(result.shape)
result = (result - result_min) / (result_max - result_min)
result = img_min + result * (img_max - img_min)
return result
def variance_transform(img, sigma, mask=None):
"""Calculate a weighted variance of the image
This function caluclates the variance of an image, weighting the
local contributions by a Gaussian.
img - image to be transformed
sigma - standard deviation of the Gaussian
mask - mask of relevant pixels in the image
"""
if mask is None:
mask = np.ones(img.shape, bool)
else:
img = img.copy()
img[~mask] = 0
#
# This is the Gaussian of the mask... so we can normalize for
# pixels near the edge of the mask
#
gmask = scind.gaussian_filter(mask.astype(float), sigma, mode="constant")
img_mean = scind.gaussian_filter(img, sigma, mode="constant") / gmask
img_squared = scind.gaussian_filter(img ** 2, sigma, mode="constant") / gmask
var = img_squared - img_mean ** 2
return var
# var = var[kernel_half_width:(kernel_half_width + img.shape[0]),
# kernel_half_width:(kernel_half_width + img.shape[0])]
# ik = ik.ravel()
# jk = jk.ravel()
# gk = np.exp(-(ik*ik + jk*jk) / (2 * sigma * sigma))
# gk = (gk / np.sum(gk)).astype(np.float32)
## We loop here in chunks of 32 x 32 because the kernel can get large.
## Remove this loop in 2025 when Numpy can grok the big object itself
## and construct the loop and run it on 1,000,000 GPU cores
##
# var = np.zeros(img.shape, np.float32)
# for ioff in range(0, img.shape[0], 32):
# for joff in range(0, img.shape[1], 32):
##
## ib and jb give addresses of the center pixel in the big image
##
# iend = min(ioff+32, img.shape[0])
# jend = min(joff+32, img.shape[1])
# ii = np.arange(ioff, iend)
# ib = ii + kernel_half_width
# jj = np.arange(joff, jend)
# jb = jj + kernel_half_width
##
## Axes 0 and 1 are the axes of the final array and rely on ib and jb
## to find the centers of the kernels in the big image.
##
## Axis 2 iterates over the elements and offsets in the kernel.
##
## We multiply each kernel contribution by the Gaussian gk to weight
## the kernel pixel's contribution. We multiply each contribution
## by its truth value in the mask to cross out border pixels.
##
# norm_chunk = (
# big_img[
# ib[:,np.newaxis,np.newaxis] + ik[np.newaxis, np.newaxis,:],
# jb[np.newaxis,:,np.newaxis] + jk[np.newaxis, np.newaxis,:]] -
# img_mean[ib[:,np.newaxis,np.newaxis],
# jb[np.newaxis,:,np.newaxis]])
# var[ii[:,np.newaxis],jj[np.newaxis,:]] = np.sum(
# norm_chunk * norm_chunk *
# gk[np.newaxis, np.newaxis,:] *
# big_mask[ib[:,np.newaxis,np.newaxis] +
# ik[np.newaxis, np.newaxis,:],
# jb[np.newaxis,:,np.newaxis] +
# jk[np.newaxis, np.newaxis,:]], 2)
##
## Finally, we divide by the Gaussian of the mask to normalize for
## pixels without contributions from masked pixels in their kernel.
##
# var /= gmask[kernel_half_width:(kernel_half_width+var.shape[0]),
# kernel_half_width:(kernel_half_width+var.shape[1])]
# return var
def inv_n(x):
"""given N matrices, return N inverses"""
#
# The inverse of a small matrix (e.g. 3x3) is
#
# 1
# ----- C(j,i)
# det(A)
#
# where C(j,i) is the cofactor of matrix A at position j,i
#
assert x.ndim == 3
assert x.shape[1] == x.shape[2]
c = np.array(
[
[cofactor_n(x, j, i) * (1 - ((i + j) % 2) * 2) for j in range(x.shape[1])]
for i in range(x.shape[1])
]
).transpose(2, 0, 1)
return c / det_n(x)[:, np.newaxis, np.newaxis]
def det_n(x):
"""given N matrices, return N determinants"""
assert x.ndim == 3
assert x.shape[1] == x.shape[2]
if x.shape[1] == 1:
return x[:, 0, 0]
result = np.zeros(x.shape[0])
for permutation in permutations(np.arange(x.shape[1])):
sign = parity(permutation)
result += (
np.prod([x[:, i, permutation[i]] for i in range(x.shape[1])], 0) * sign
)
sign = -sign
return result
def parity(x):
"""The parity of a permutation
The parity of a permutation is even if the permutation can be
formed by an even number of transpositions and is odd otherwise.
The parity of a permutation is even if there are an even number of
compositions of even size and odd otherwise. A composition is a cycle:
for instance in (1, 2, 0, 3), there is the cycle: (0->1, 1->2, 2->0)
and the cycle, (3->3). Both cycles are odd, so the parity is even:
you can exchange 0 and 1 giving (0, 2, 1, 3) and 2 and 1 to get
(0, 1, 2, 3)
"""
order = np.lexsort((x,))
hit = np.zeros(len(x), bool)
p = 0
for j in range(len(x)):
if not hit[j]:
cycle = 1
i = order[j]
# mark every node in a cycle
while i != j:
hit[i] = True
i = order[i]
cycle += 1
p += cycle - 1
return 1 if p % 2 == 0 else -1
def cofactor_n(x, i, j):
"""Return the cofactor of n matrices x[n,i,j] at position i,j
The cofactor is the determinant of the matrix formed by removing
row i and column j.
"""
m = x.shape[1]
mr = np.arange(m)
i_idx = mr[mr != i]
j_idx = mr[mr != j]
return det_n(x[:, i_idx[:, np.newaxis], j_idx[np.newaxis, :]])
def dot_n(x, y):
"""given two tensors N x I x K and N x K x J return N dot products
If either x or y is 2-dimensional, broadcast it over all N.
Dot products are size N x I x J.
Example:
x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]])
y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]])
print dot_n(x,y)
array([[[ 9, 12, 15],
[ 19, 26, 33],
[ 29, 40, 51]],
[[129, 144, 159],
[163, 182, 201],
[197, 220, 243]]])
"""
if x.ndim == 2:
if y.ndim == 2:
return np.dot(x, y)
x3 = False
y3 = True
nlen = y.shape[0]
elif y.ndim == 2:
nlen = x.shape[0]
x3 = True
y3 = False
else:
assert x.shape[0] == y.shape[0]
nlen = x.shape[0]
x3 = True
y3 = True
assert x.shape[1 + x3] == y.shape[0 + y3]
n, i, j, k = np.mgrid[
0:nlen, 0 : x.shape[0 + x3], 0 : y.shape[1 + y3], 0 : y.shape[0 + y3]
]
return np.sum((x[n, i, k] if x3 else x[i, k]) * (y[n, k, j] if y3 else y[k, j]), 3)
def permutations(x):
"""Given a listlike, x, return all permutations of x
Returns the permutations of x in the lexical order of their indices:
e.g.
>>> x = [ 1, 2, 3, 4 ]
>>> for p in permutations(x):
>>> print p
[ 1, 2, 3, 4 ]
[ 1, 2, 4, 3 ]
[ 1, 3, 2, 4 ]
[ 1, 3, 4, 2 ]
[ 1, 4, 2, 3 ]
[ 1, 4, 3, 2 ]
[ 2, 1, 3, 4 ]
...
[ 4, 3, 2, 1 ]
"""
#
# The algorithm is attributed to Narayana Pandit from his
# Ganita Kaumundi (1356). The following is from
#
# http://en.wikipedia.org/wiki/Permutation#Systematic_generation_of_all_permutations
#
# 1. Find the largest index k such that a[k] < a[k + 1].
# If no such index exists, the permutation is the last permutation.
# 2. Find the largest index l such that a[k] < a[l].
# Since k + 1 is such an index, l is well defined and satisfies k < l.
# 3. Swap a[k] with a[l].
# 4. Reverse the sequence from a[k + 1] up to and including the final
# element a[n].
#
yield list(x) # don't forget to do the first one
x = np.array(x)
a = np.arange(len(x))
while True:
# 1 - find largest or stop
ak_lt_ak_next = np.argwhere(a[:-1] < a[1:])
if len(ak_lt_ak_next) == 0:
return
k = ak_lt_ak_next[-1, 0]
# 2 - find largest a[l] < a[k]
ak_lt_al = np.argwhere(a[k] < a)
l = ak_lt_al[-1, 0]
# 3 - swap
a[k], a[l] = (a[l], a[k])
# 4 - reverse
if k < len(x) - 1:
a[k + 1 :] = a[:k:-1].copy()
yield x[a].tolist()
def convex_hull_transform(
image, levels=256, mask=None, chunksize=CONVEX_HULL_CHUNKSIZE, pass_cutoff=16
):
"""Perform the convex hull transform of this image
image - image composed of integer intensity values
levels - # of levels that we separate the image into
mask - mask of points to consider or None to consider all points
chunksize - # of points processed in first pass of convex hull
for each intensity value, find the convex hull of pixels at or above
that value and color all pixels within the hull with that value.
"""
# Scale the image into the requisite number of levels
if mask is None:
img_min = np.min(image)
img_max = np.max(image)
else:
unmasked_pixels = image[mask]
if len(unmasked_pixels) == 0:
return np.zeros(image.shape, image.dtype)
img_min = np.min(unmasked_pixels)
img_max = np.max(unmasked_pixels)
img_shape = tuple(image.shape)
if img_min == img_max:
return image
scale = img_min + np.arange(levels).astype(image.dtype) * (
img_max - img_min
) / float(levels - 1)
image = (image - img_min) * (levels - 1) / (img_max - img_min)
if mask is not None:
image[~mask] = 0
#
# If there are more than 16 levels, we do the method first at a coarse
# scale. The dark objects can produce points at every level, so doing
# two passes can reduce the number of points in the second pass to
# only the difference between two levels at the coarse pass.
#
if levels > pass_cutoff:
sub_levels = int(np.sqrt(levels))
rough_image = convex_hull_transform(np.floor(image), sub_levels)
image = np.maximum(image, rough_image)
del rough_image
image = image.astype(int)
#
# Get rid of any levels that have no representatives
#
unique = np.unique(image)
new_values = np.zeros(levels, int)
new_values[unique] = np.arange(len(unique))
scale = scale[unique]
image = new_values[image]
#
# Start by constructing the list of points which are local maxima
#
min_image = grey_erosion(image, footprint=np.ones((3, 3), bool)).astype(int)
#
# Set the borders of the min_image to zero so that the border pixels
# will be in all convex hulls below their intensity
#
min_image[0, :] = 0
min_image[-1, :] = 0
min_image[:, 0] = 0
min_image[:, -1] = 0
i, j = np.mgrid[0 : image.shape[0], 0 : image.shape[1]]
mask = image > min_image
i = i[mask]
j = j[mask]
min_image = min_image[mask]
image = image[mask]
#
# Each point that is a maximum is a potential vertex in the convex hull
# for each value above the minimum. Therefore, it appears
#
# image - min_image
#
# times in the i,j,v list of points. So we can do a sum to calculate
# the number of points, then use cumsum to figure out the first index
# in that array of points for each i,j,v. We can then use cumsum
# again on the array of points to assign their levels.
#
count = image - min_image
npoints = np.sum(count)
# The index in the big array of the first point to place for each
# point
first_index_in_big = np.cumsum(count) - count
#
# The big array can be quite big, for example if there are lots of
# thin, dark objects. We do two passes of convex hull: the convex hull
# of the convex hulls of several regions is the convex hull of the whole
# so it doesn't matter too much how we break up the array.
#
first_i = np.zeros(0, int)
first_j = np.zeros(0, int)
first_levels = np.zeros(0, int)
chunkstart = 0
while chunkstart < len(count):
idx = first_index_in_big[chunkstart]
iend = idx + chunksize
if iend >= npoints:
chunkend = len(count)
iend = npoints
else:
chunkend = np.searchsorted(first_index_in_big, iend)
if chunkend < len(count):
iend = first_index_in_big[chunkend]
else:
iend = npoints
chunk_first_index_in_big = first_index_in_big[chunkstart:chunkend] - idx
chunkpoints = iend - idx
#
# For the big array, construct an array of indexes into the small array
#
index_in_small = np.zeros(chunkpoints, int)
index_in_small[0] = chunkstart
index_in_small[chunk_first_index_in_big[1:]] = 1
index_in_small = np.cumsum(index_in_small)
#
# We're going to do a cumsum to make the big array of levels. Point
# n+1 broadcasts its first value into first_index_in_big[n+1].
# The value that precedes it is image[n]. Therefore, in order to
# get the correct value in cumsum:
#
# ? + image[n] = min_image[n+1]+1
# ? = min_image[n+1] + 1 - image[n]
#
levels = np.ones(chunkpoints, int)
levels[0] = min_image[chunkstart] + 1
levels[chunk_first_index_in_big[1:]] = (
min_image[chunkstart + 1 : chunkend] - image[chunkstart : chunkend - 1] + 1
)
levels = np.cumsum(levels)
#
# Construct the ijv
#
ijv = np.column_stack((i[index_in_small], j[index_in_small], levels))
#
# Get all of the convex hulls
#
pts, counts = convex_hull_ijv(ijv, np.arange(1, len(unique)))
first_i = np.hstack((first_i, pts[:, 1]))
first_j = np.hstack((first_j, pts[:, 2]))
first_levels = np.hstack((first_levels, pts[:, 0]))
chunkstart = chunkend
#
# Now do the convex hull of the reduced list of points
#
ijv = np.column_stack((first_i, first_j, first_levels))
pts, counts = convex_hull_ijv(ijv, np.arange(1, len(unique)))
#
# Get the points along the lines described by the convex hulls
#
# There are N points for each label. Draw a line from each to
# the next, except for the last which we draw from last to first
#
labels = pts[:, 0]
i = pts[:, 1]
j = pts[:, 2]
first_index = np.cumsum(counts) - counts
last_index = first_index + counts - 1
next = np.arange(len(labels)) + 1
next[last_index] = first_index
index, count, i, j = get_line_pts(i, j, i[next], j[next])
#
# use a cumsum to get the index of each point from get_line_pts
# relative to the labels vector
#
big_index = np.zeros(len(i), int)
big_index[index[1:]] = 1
big_index = np.cumsum(big_index)
labels = labels[big_index]
#
# A given i,j might be represented more than once. Take the maximum
# label at each i,j. First sort by i,j and label. Then take only values
# that have a different i,j than the succeeding value. The last element
# is always a winner.
#
order = np.lexsort((labels, i, j))
i = i[order]
j = j[order]
labels = labels[order]
mask = np.hstack((((i[1:] != i[:-1]) | (j[1:] != j[:-1])), [True]))
i = i[mask]
j = j[mask]
labels = labels[mask]
#
# Now, we have an interesting object. It's ordered by j, then i which
# means that we have scans of interesting i at each j. The points
# that aren't represented should have the minimum of the values
# above and below.
#
# We can play a cumsum trick to do this, placing the difference
# of a point with its previous in the 2-d image, then summing along
# each i axis to set empty values to the value of the nearest occupied
# value above and a similar trick to set empty values to the nearest
# value below. We then take the minimum of the two results.
#
first = np.hstack(([True], j[1:] != j[:-1]))
top = np.zeros(img_shape, labels.dtype)
top[i[first], j[first]] = labels[first]
top[i[~first], j[~first]] = (labels[1:] - labels[:-1])[~first[1:]]
top = np.cumsum(top, 0)
# From 0 to the location of the first point, set to value of first point
bottom = np.zeros(img_shape, labels.dtype)
bottom[0, j[first]] = labels[first]
# From 1 + the location of the previous point, set to the next point
last = np.hstack((first[1:], [True]))
bottom[i[:-1][~first[1:]] + 1, j[~first]] = (labels[1:] - labels[:-1])[~first[1:]]
# Set 1 + the location of the last point to -labels so that all past
# the end will be zero (check for i at end...)
llast = last & (i < img_shape[0] - 1)
bottom[i[llast] + 1, j[llast]] = -labels[llast]
bottom = np.cumsum(bottom, 0)
image = np.minimum(top, bottom)
return scale[image]
def circular_hough(img, radius, nangles=None, mask=None):
"""Circular Hough transform of an image
img - image to be transformed.
radius - radius of circle
nangles - # of angles to measure, e.g. nangles = 4 means accumulate at
0, 90, 180 and 270 degrees.
Return the Hough transform of the image which is the accumulators
for the transform x + r cos t, y + r sin t.
"""
a = np.zeros(img.shape)
m = np.zeros(img.shape)
if nangles is None:
# if no angle specified, take the circumference
# Round to a multiple of 4 to make it bilaterally stable
nangles = int(np.pi * radius + 3.5) & (~3)
for i in range(nangles):
theta = 2 * np.pi * float(i) / float(nangles)
x = int(np.round(radius * np.cos(theta)))
y = int(np.round(radius * np.sin(theta)))
xmin = max(0, -x)
xmax = min(img.shape[1] - x, img.shape[1])
ymin = max(0, -y)
ymax = min(img.shape[0] - y, img.shape[0])
dest = (slice(ymin, ymax), slice(xmin, xmax))
src = (slice(ymin + y, ymax + y), slice(xmin + x, xmax + x))
if mask is not None:
a[dest][mask[src]] += img[src][mask[src]]
m[dest][mask[src]] += 1
else:
a[dest] += img[src]
m[dest] += 1
a[m > 0] /= m[m > 0]
return a
def hessian(
image, return_hessian=True, return_eigenvalues=True, return_eigenvectors=True
):
"""Calculate hessian, its eigenvalues and eigenvectors
image - n x m image. Smooth the image with a Gaussian to get derivatives
at different scales.
return_hessian - true to return an n x m x 2 x 2 matrix of the hessian
at each pixel
return_eigenvalues - true to return an n x m x 2 matrix of the eigenvalues
of the hessian at each pixel
return_eigenvectors - true to return an n x m x 2 x 2 matrix of the
eigenvectors of the hessian at each pixel
The values of the border pixels for the image are not calculated and
are zero
"""
# The Hessian, d(f(x0, x1))/dxi/dxj for i,j = [0,1] is approximated by the
# following kernels:
# d00: [[1], [-2], [1]]
# d11: [[1, -2, 1]]
# d01 and d10: [[ 1, 0,-1],
# [ 0, 0, 0],
# [ -1, 0, 1]] / 2
# The eigenvalues of the hessian:
# [[d00, d01]
# [d01, d11]]
# L1 = (d00 + d11) / 2 + ((d00 + d11)**2 / 4 - (d00 * d11 - d01**2)) ** .5
# L2 = (d00 + d11) / 2 - ((d00 + d11)**2 / 4 - (d00 * d11 - d01**2)) ** .5
# The eigenvectors of the hessian:
# if d01 != 0:
# [(L1 - d11, d01), (L2 - d11, d01)]
# else:
# [ (1, 0), (0, 1) ]
# Ideas and code borrowed from:
# http://www.math.harvard.edu/archive/21b_fall_04/exhibits/2dmatrices/index.html
# http://www.longair.net/edinburgh/imagej/tubeness/
hessian = np.zeros((image.shape[0], image.shape[1], 2, 2))
hessian[1:-1, :, 0, 0] = image[:-2, :] - (2 * image[1:-1, :]) + image[2:, :]
hessian[1:-1, 1:-1, 0, 1] = hessian[1:-1, 1:-1, 0, 1] = (
image[2:, 2:] + image[:-2, :-2] - image[2:, :-2] - image[:-2, 2:]
) / 4
hessian[:, 1:-1, 1, 1] = image[:, :-2] - (2 * image[:, 1:-1]) + image[:, 2:]
#
# Solve the eigenvalue equation:
# H x = L x
#
# Much of this from Eigensystem2x2Float.java from tubeness
#
A = hessian[:, :, 0, 0]
B = hessian[:, :, 0, 1]
C = hessian[:, :, 1, 1]
b = -(A + C)
c = A * C - B * B
discriminant = b * b - 4 * c
# pn is something that broadcasts over all points and either adds or
# subtracts the +/- part of the eigenvalues
pn = np.array([1, -1])[np.newaxis, np.newaxis, :]
L = (-b[:, :, np.newaxis] + (np.sqrt(discriminant)[:, :, np.newaxis] * pn)) / 2
#
# Report eigenvalue # 0 as the one with the highest absolute magnitude
#
L[np.abs(L[:, :, 1]) > np.abs(L[:, :, 0]), :] = L[
np.abs(L[:, :, 1]) > np.abs(L[:, :, 0]), ::-1
]
if return_eigenvectors:
#
# Calculate for d01 != 0
#
v = np.ones((image.shape[0], image.shape[1], 2, 2)) * np.nan
v[:, :, :, 0] = L - hessian[:, :, 1, 1, np.newaxis]
v[:, :, :, 1] = hessian[:, :, 0, 1, np.newaxis]
#
# Calculate for d01 = 0
default = np.array([[1, 0], [0, 1]])[np.newaxis, :, :]
v[hessian[:, :, 0, 1] == 0] = default
#
# Normalize the vectors
#
d = np.sqrt(np.sum(v * v, 3))
v /= d[:, :, :, np.newaxis]
result = []
if return_hessian:
result.append(hessian)
if return_eigenvalues:
result.append(L)
if return_eigenvectors:
result.append(v)
if len(result) == 0:
return
elif len(result) == 1:
return result[0]
return tuple(result)
def poisson_equation(
image, gradient=1, max_iter=100, convergence=0.01, percentile=90.0
):
"""Estimate the solution to the Poisson Equation
The Poisson Equation is the solution to gradient(x) = h^2/4 and, in this
context, we use a boundary condition where x is zero for background
pixels. Also, we set h^2/4 = 1 to indicate that each pixel is a distance
of 1 from its neighbors.
The estimation exits after max_iter iterations or if the given percentile
of foreground pixels differ by less than the convergence fraction
from one pass to the next.
Some ideas taken from Gorelick, "Shape representation and classification
using the Poisson Equation", IEEE Transactions on Pattern Analysis and
Machine Intelligence V28, # 12, 2006
image - binary image with foreground as True
gradient - the target gradient between 4-adjacent pixels
max_iter - maximum # of iterations at a given level
convergence - target fractional difference between values from previous
and next pass
percentile - measure convergence at this percentile
"""
# Evaluate the poisson equation with zero-padded boundaries
pe = np.zeros((image.shape[0] + 2, image.shape[1] + 2))
if image.shape[0] > 64 and image.shape[1] > 64:
#
# Sub-sample to get seed values
#
sub_image = image[::2, ::2]
sub_pe = poisson_equation(
sub_image, gradient=gradient * 2, max_iter=max_iter, convergence=convergence
)
coordinates = (
np.mgrid[0 : (sub_pe.shape[0] * 2), 0 : (sub_pe.shape[1] * 2)].astype(float)
/ 2
)
pe[
1 : (sub_image.shape[0] * 2 + 1), 1 : (sub_image.shape[1] * 2 + 1)
] = scind.map_coordinates(sub_pe, coordinates, order=1)
pe[: image.shape[0], : image.shape[1]][~image] = 0
else:
pe[1:-1, 1:-1] = image
#
# evaluate only at i and j within the foreground
#
i, j = np.mgrid[0 : pe.shape[0], 0 : pe.shape[1]]
mask = (i > 0) & (i < pe.shape[0] - 1) & (j > 0) & (j < pe.shape[1] - 1)
mask[mask] = image[i[mask] - 1, j[mask] - 1]
i = i[mask]
j = j[mask]
if len(i) == 0:
return pe[1:-1, 1:-1]
if len(i) == 1:
# Just in case "percentile" can't work when unable to interpolate
# between a single value... Isolated pixels have value = 1
#
pe[mask] = 1
return pe[1:-1, 1:-1]
for itr in range(max_iter):
next_pe = (pe[i + 1, j] + pe[i - 1, j] + pe[i, j + 1] + pe[i, j - 1]) / 4 + 1
difference = np.abs((pe[mask] - next_pe) / next_pe)
pe[mask] = next_pe
if np.percentile(difference, percentile) <= convergence:
break
return pe[1:-1, 1:-1]
| CellProfiler/centrosome | centrosome/filter.py | Python | bsd-3-clause | 71,074 | [
"Gaussian"
] | bbf7fa52014efd858e7b03f49ad7edf3d7f4833e8ca73f89631651b32e98200d |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class RegExpName(Rule):
"""Rule that checks for full or partial name matches"""
labels = [_('Text:')]
name = _('People with a name matching <text>')
description = _("Matches people's names containing a substring or "
"matching a regular expression")
category = _('General filters')
allow_regex = True
def apply(self,db,person):
for name in [person.get_primary_name()] + person.get_alternate_names():
for field in [name.first_name, name.get_surname(), name.suffix,
name.title, name.nick, name.famnick, name.call]:
if self.match_substring(0, field):
return True
return False
| sam-m888/gprime | gprime/filters/rules/person/_regexpname.py | Python | gpl-2.0 | 2,185 | [
"Brian"
] | b22b9ec021d4b24a308e1af95d7c1fea06f1601977912fc959b192c46fb74eea |
from behave import *
import uuid as uuid
from django.contrib.auth.models import User
from page_actions import *
from page_objects import *
username = 'user999999999'
uid = uuid.uuid4()
email_address = 'ureport@webpro.com'
password = 'password'
@given(u'I am a logged into wep-pro')
def step_impl(context):
login_to_web_pro(context, email_address, password)
@when(u'I visit the shout page')
def step_impl(context):
go_to_shout_page(context)
@then(u'I shall be able to send a message')
def step_impl(context):
send_a_message(context.browser, 'Testing Testing')
assert (send_message_notification_present,'Thank_you_message not found')
| rapidpro/ureport-web-participation | features/steps/shout_step.py | Python | agpl-3.0 | 655 | [
"VisIt"
] | 1188b18faaee38afd69451650e764ae455fbd107190c1ecacd6bbf54cb62b381 |
import os
import shutil
import logging
import re
from functools import partial, wraps
import netCDF4
import numpy as np
from django.db import models, transaction
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import UploadedFile
from django.template import Template, Context
from django.forms.models import formset_factory, BaseFormSet
from dominate.tags import div, legend, form, button, p, textarea, strong, input
from hs_core.hydroshare import utils
from hs_core.hydroshare.resource import delete_resource_file
from hs_core.forms import CoverageTemporalForm, CoverageSpatialForm
from hs_core.models import Creator, Contributor
from hs_app_netCDF.models import NetCDFMetaDataMixin, OriginalCoverage, Variable
from hs_app_netCDF.forms import VariableForm, VariableValidationForm, OriginalCoverageForm
from base import AbstractFileMetaData, AbstractLogicalFile
import hs_file_types.nc_functions.nc_utils as nc_utils
import hs_file_types.nc_functions.nc_dump as nc_dump
import hs_file_types.nc_functions.nc_meta as nc_meta
class NetCDFFileMetaData(NetCDFMetaDataMixin, AbstractFileMetaData):
# the metadata element models are from the netcdf resource type app
model_app_label = 'hs_app_netCDF'
def get_metadata_elements(self):
elements = super(NetCDFFileMetaData, self).get_metadata_elements()
elements += [self.original_coverage]
elements += list(self.variables.all())
return elements
@classmethod
def get_metadata_model_classes(cls):
metadata_model_classes = super(NetCDFFileMetaData, cls).get_metadata_model_classes()
metadata_model_classes['originalcoverage'] = OriginalCoverage
metadata_model_classes['variable'] = Variable
return metadata_model_classes
@property
def original_coverage(self):
# There can be at most only one instance of type OriginalCoverage associated
# with this metadata object
return self.ori_coverage.all().first()
def get_html(self):
"""overrides the base class function"""
html_string = super(NetCDFFileMetaData, self).get_html()
if self.spatial_coverage:
html_string += self.spatial_coverage.get_html()
if self.originalCoverage:
html_string += self.originalCoverage.get_html()
if self.temporal_coverage:
html_string += self.temporal_coverage.get_html()
variable_legend = legend("Variables", cls="pull-left", style="margin-top:20px;")
html_string += variable_legend.render()
for variable in self.variables.all():
html_string += variable.get_html()
# ncdump text from the txt file
html_string += self.get_ncdump_html().render()
template = Template(html_string)
context = Context({})
return template.render(context)
def get_html_forms(self, dataset_name_form=True, temporal_coverage=True, **kwargs):
"""overrides the base class function"""
root_div = div("{% load crispy_forms_tags %}")
with root_div:
self.get_update_netcdf_file_html_form()
super(NetCDFFileMetaData, self).get_html_forms()
with div(cls="row"):
with div(cls="col-lg-6 col-xs-12", id="original-coverage-filetype"):
with form(id="id-origcoverage-file-type",
action="{{ orig_coverage_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy orig_coverage_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
with div(cls="col-lg-6 col-xs-12", id="spatial-coverage-filetype"):
with form(id="id-spatial-coverage-file-type",
action="{{ spatial_coverage_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy spatial_coverage_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
with div(cls="pull-left col-sm-12"):
# id has to be variables to get the vertical scrollbar
with div(cls="well", id="variables"):
with div(cls="row"):
with div("{% for form in variable_formset_forms %}"):
with div(cls="col-sm-6 col-xs-12"):
with form(id="{{ form.form_id }}", action="{{ form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
div("{% endfor %}")
self.get_ncdump_html()
template = Template(root_div.render())
temp_cov_form = self.get_temporal_coverage_form()
update_action = "/hydroshare/hsapi/_internal/NetCDFLogicalFile/{0}/{1}/{2}/update-file-metadata/"
create_action = "/hydroshare/hsapi/_internal/NetCDFLogicalFile/{0}/{1}/add-file-metadata/"
if self.temporal_coverage:
temp_action = update_action.format(self.logical_file.id, "coverage",
self.temporal_coverage.id)
else:
temp_action = create_action.format(self.logical_file.id, "coverage")
temp_cov_form.action = temp_action
orig_cov_form = self.get_original_coverage_form()
if self.originalCoverage:
temp_action = update_action.format(self.logical_file.id, "originalcoverage",
self.originalCoverage.id)
else:
temp_action = create_action.format(self.logical_file.id, "originalcoverage")
orig_cov_form.action = temp_action
spatial_cov_form = self.get_spatial_coverage_form(allow_edit=True)
if self.spatial_coverage:
temp_action = update_action.format(self.logical_file.id, "coverage",
self.spatial_coverage.id)
else:
temp_action = create_action.format(self.logical_file.id, "coverage")
spatial_cov_form.action = temp_action
context_dict = dict()
context_dict["temp_form"] = temp_cov_form
context_dict["orig_coverage_form"] = orig_cov_form
context_dict["spatial_coverage_form"] = spatial_cov_form
context_dict["variable_formset_forms"] = self.get_variable_formset().forms
context = Context(context_dict)
rendered_html = template.render(context)
return rendered_html
def get_update_netcdf_file_html_form(self):
form_action = "/hydroshare/hsapi/_internal/{}/update-netcdf-file/".format(self.id)
style = "display:none;"
if self.is_dirty:
style = "margin-bottom:10px"
root_div = div(id="div-netcdf-file-update", cls="row", style=style)
with root_div:
with div(cls="col-sm-12"):
with div(cls="alert alert-warning alert-dismissible", role="alert"):
strong("NetCDF file needs to be synced with metadata changes.")
input(id="metadata-dirty", type="hidden", value=self.is_dirty)
with form(action=form_action, method="post", id="update-netcdf-file"):
button("Update NetCDF File", type="button", cls="btn btn-primary",
id="id-update-netcdf-file")
return root_div
def get_original_coverage_form(self):
return OriginalCoverage.get_html_form(resource=None, element=self.originalCoverage,
file_type=True)
def get_variable_formset(self):
VariableFormSetEdit = formset_factory(
wraps(VariableForm)(partial(VariableForm, allow_edit=True)),
formset=BaseFormSet, extra=0)
variable_formset = VariableFormSetEdit(
initial=self.variables.all().values(), prefix='Variable')
for frm in variable_formset.forms:
if len(frm.initial) > 0:
frm.action = "/hydroshare/hsapi/_internal/%s/%s/variable/%s/update-file-metadata/" % (
"NetCDFLogicalFile", self.logical_file.id, frm.initial['id'])
frm.number = frm.initial['id']
return variable_formset
def get_ncdump_html(self):
"""
Generates html code to display the contents of the ncdump text file. The generated html
is used for netcdf file type metadata view and edit modes.
:return:
"""
nc_dump_div = div()
nc_dump_res_file = None
for f in self.logical_file.files.all():
if f.extension == ".txt":
nc_dump_res_file = f
break
if nc_dump_res_file is not None:
nc_dump_div = div(style="clear: both", cls="col-xs-12")
with nc_dump_div:
legend("NetCDF Header Information")
p(nc_dump_res_file.full_path[33:])
header_info = nc_dump_res_file.resource_file.read()
header_info = header_info.decode('utf-8')
textarea(header_info, readonly="", rows="15",
cls="input-xlarge", style="min-width: 100%")
return nc_dump_div
@classmethod
def validate_element_data(cls, request, element_name):
"""overriding the base class method"""
if element_name.lower() not in [el_name.lower() for el_name
in cls.get_supported_element_names()]:
err_msg = "{} is nor a supported metadata element for NetCDF file type"
err_msg = err_msg.format(element_name)
return {'is_valid': False, 'element_data_dict': None, "errors": err_msg}
element_name = element_name.lower()
if element_name == 'variable':
form_data = {}
for field_name in VariableValidationForm().fields:
try:
# when the request comes from the UI, the variable attributes have a prefix of
# '-'
matching_key = [key for key in request.POST if '-' + field_name in key][0]
except IndexError:
if field_name in request.POST:
matching_key = field_name
else:
continue
form_data[field_name] = request.POST[matching_key]
element_form = VariableValidationForm(form_data)
elif element_name == 'originalcoverage':
element_form = OriginalCoverageForm(data=request.POST)
elif element_name == 'coverage' and 'start' not in request.POST:
element_form = CoverageSpatialForm(data=request.POST)
else:
# here we are assuming temporal coverage
element_form = CoverageTemporalForm(data=request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
def add_to_xml_container(self, container):
"""Generates xml+rdf representation of all metadata elements associated with this
logical file type instance"""
container_to_add_to = super(NetCDFFileMetaData, self).add_to_xml_container(container)
if self.originalCoverage:
self.originalCoverage.add_to_xml_container(container_to_add_to)
for variable in self.variables.all():
variable.add_to_xml_container(container_to_add_to)
class NetCDFLogicalFile(AbstractLogicalFile):
metadata = models.OneToOneField(NetCDFFileMetaData, related_name="logical_file")
data_type = "Multidimensional"
@classmethod
def get_allowed_uploaded_file_types(cls):
"""only .nc file can be set to this logical file group"""
return [".nc"]
@classmethod
def get_allowed_storage_file_types(cls):
"""file types allowed in this logical file group are: .nc and .txt"""
return [".nc", ".txt"]
@classmethod
def create(cls):
"""this custom method MUST be used to create an instance of this class"""
netcdf_metadata = NetCDFFileMetaData.objects.create(keywords=[])
return cls.objects.create(metadata=netcdf_metadata)
@property
def supports_resource_file_move(self):
"""resource files that are part of this logical file can't be moved"""
return False
@property
def supports_resource_file_add(self):
"""doesn't allow a resource file to be added"""
return False
@property
def supports_resource_file_rename(self):
"""resource files that are part of this logical file can't be renamed"""
return False
@property
def supports_delete_folder_on_zip(self):
"""does not allow the original folder to be deleted upon zipping of that folder"""
return False
def update_netcdf_file(self, user):
"""
writes metadata to the netcdf file associated with this instance of the logical file
:return:
"""
log = logging.getLogger()
nc_res_file = ''
txt_res_file = ''
for f in self.files.all():
if f.extension == '.nc':
nc_res_file = f
break
for f in self.files.all():
if f.extension == '.txt':
txt_res_file = f
break
if not nc_res_file:
msg = "No netcdf file exists for this logical file."
log.exception(msg)
raise ValidationError(msg)
netcdf_file_update(self, nc_res_file, txt_res_file, user)
@classmethod
def set_file_type(cls, resource, file_id, user):
"""
Sets a tif or zip raster resource file to GeoRasterFile type
:param resource: an instance of resource type CompositeResource
:param file_id: id of the resource file to be set as GeoRasterFile type
:param user: user who is setting the file type
:return:
"""
# had to import it here to avoid import loop
from hs_core.views.utils import create_folder, remove_folder
log = logging.getLogger()
# get the file from irods
res_file = utils.get_resource_file_by_id(resource, file_id)
if res_file is None:
raise ValidationError("File not found.")
if res_file.extension != '.nc':
raise ValidationError("Not a NetCDF file.")
# base file name (no path included)
file_name = res_file.file_name
# file name without the extension
nc_file_name = file_name[:-len(res_file.extension)]
resource_metadata = []
file_type_metadata = []
files_to_add_to_resource = []
upload_folder = ''
if res_file.has_generic_logical_file:
# get the file from irods to temp dir
temp_file = utils.get_file_from_irods(res_file)
temp_dir = os.path.dirname(temp_file)
files_to_add_to_resource.append(temp_file)
# file validation and metadata extraction
nc_dataset = nc_utils.get_nc_dataset(temp_file)
if isinstance(nc_dataset, netCDF4.Dataset):
# Extract the metadata from netcdf file
res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(temp_file)
# populate resource_metadata and file_type_metadata lists with extracted metadata
add_metadata_to_list(resource_metadata, res_dublin_core_meta,
res_type_specific_meta, file_type_metadata, resource)
# create the ncdump text file
dump_file = create_header_info_txt_file(temp_file, nc_file_name)
files_to_add_to_resource.append(dump_file)
file_folder = res_file.file_folder
with transaction.atomic():
# create a netcdf logical file object to be associated with
# resource files
logical_file = cls.create()
# by default set the dataset_name attribute of the logical file to the
# name of the file selected to set file type unless the extracted metadata
# has a value for title
dataset_title = res_dublin_core_meta.get('title', None)
if dataset_title is not None:
logical_file.dataset_name = dataset_title
else:
logical_file.dataset_name = nc_file_name
logical_file.save()
try:
# create a folder for the netcdf file type using the base file
# name as the name for the new folder
new_folder_path = cls.compute_file_type_folder(resource, file_folder,
nc_file_name)
create_folder(resource.short_id, new_folder_path)
log.info("Folder created:{}".format(new_folder_path))
new_folder_name = new_folder_path.split('/')[-1]
if file_folder is None:
upload_folder = new_folder_name
else:
upload_folder = os.path.join(file_folder, new_folder_name)
# add all new files to the resource
for f in files_to_add_to_resource:
uploaded_file = UploadedFile(file=open(f, 'rb'),
name=os.path.basename(f))
# the added resource file will be part of a new generic logical file
# by default
new_res_file = utils.add_file_to_resource(
resource, uploaded_file, folder=upload_folder
)
# delete the generic logical file object
if new_res_file.logical_file is not None:
# deleting the file level metadata object will delete the associated
# logical file object
new_res_file.logical_file.metadata.delete()
# make each resource file we added part of the logical file
logical_file.add_resource_file(new_res_file)
log.info("NetCDF file type - new files were added to the resource.")
# use the extracted metadata to populate resource metadata
for element in resource_metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = element.items()[0]
if k == 'title':
# update title element
title_element = resource.metadata.title
resource.metadata.update_element('title', title_element.id, **v)
else:
resource.metadata.create_element(k, **v)
log.info("Resource - metadata was saved to DB")
# use the extracted metadata to populate file metadata
for element in file_type_metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = element.items()[0]
if k == 'subject':
logical_file.metadata.keywords = v
logical_file.metadata.save()
# update resource level keywords
resource_keywords = [subject.value.lower() for subject in
resource.metadata.subjects.all()]
for kw in logical_file.metadata.keywords:
if kw.lower() not in resource_keywords:
resource.metadata.create_element('subject', value=kw)
else:
logical_file.metadata.create_element(k, **v)
log.info("NetCDF file type - metadata was saved to DB")
# set resource to private if logical file is missing required metadata
resource.update_public_and_discoverable()
# delete the original resource file
delete_resource_file(resource.short_id, res_file.id, user)
log.info("Deleted original resource file.")
except Exception as ex:
msg = "NetCDF file type. Error when setting file type. Error:{}"
msg = msg.format(ex.message)
log.exception(msg)
if upload_folder:
# delete any new files uploaded as part of setting file type
folder_to_remove = os.path.join('data', 'contents', upload_folder)
remove_folder(user, resource.short_id, folder_to_remove)
log.info("Deleted newly created file type folder")
raise ValidationError(msg)
finally:
# remove temp dir
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
else:
err_msg = "Not a valid NetCDF file. File type file validation failed."
log.error(err_msg)
# remove temp dir
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError(err_msg)
def add_metadata_to_list(res_meta_list, extracted_core_meta, extracted_specific_meta,
file_meta_list=None, resource=None):
"""
Helper function to populate metadata lists (*res_meta_list* and *file_meta_list*) with
extracted metadata from the NetCDF file. These metadata lists are then used for creating
metadata element objects by the caller.
:param res_meta_list: a list to store data to create metadata elements at the resource level
:param extracted_core_meta: a dict of extracted dublin core metadata
:param extracted_specific_meta: a dict of extracted metadata that is NetCDF specific
:param file_meta_list: a list to store data to create metadata elements at the file type level
(must be None when this helper function is used for NetCDF resource and must not be None
when used for NetCDF file type
:param resource: an instance of BaseResource (must be None when this helper function is used
for NteCDF resource and must not be None when used for NetCDF file type)
:return:
"""
# add title
if resource is not None and file_meta_list is not None:
# file type
if resource.metadata.title.value.lower() == 'untitled resource':
add_title_metadata(res_meta_list, extracted_core_meta)
else:
# resource type
add_title_metadata(res_meta_list, extracted_core_meta)
# add abstract (Description element)
if resource is not None and file_meta_list is not None:
# file type
if resource.metadata.description is None:
add_abstract_metadata(res_meta_list, extracted_core_meta)
else:
# resource type
add_abstract_metadata(res_meta_list, extracted_core_meta)
# add keywords
if file_meta_list is not None:
# file type
add_keywords_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_keywords_metadata(res_meta_list, extracted_core_meta, file_type=False)
# add creators:
if resource is not None:
# file type
add_creators_metadata(res_meta_list, extracted_core_meta,
resource.metadata.creators.all())
else:
# resource type
add_creators_metadata(res_meta_list, extracted_core_meta,
Creator.objects.none())
# add contributors:
if resource is not None:
# file type
add_contributors_metadata(res_meta_list, extracted_core_meta,
resource.metadata.contributors.all())
else:
# resource type
add_contributors_metadata(res_meta_list, extracted_core_meta,
Contributor.objects.none())
# add source (applies only to NetCDF resource type)
if extracted_core_meta.get('source') and file_meta_list is None:
source = {'source': {'derived_from': extracted_core_meta['source']}}
res_meta_list.append(source)
# add relation (applies only to NetCDF resource type)
if extracted_core_meta.get('references') and file_meta_list is None:
relation = {'relation': {'type': 'cites',
'value': extracted_core_meta['references']}}
res_meta_list.append(relation)
# add rights (applies only to NetCDF resource type)
if extracted_core_meta.get('rights') and file_meta_list is None:
raw_info = extracted_core_meta.get('rights')
b = re.search("(?P<url>https?://[^\s]+)", raw_info)
url = b.group('url') if b else ''
statement = raw_info.replace(url, '') if url else raw_info
rights = {'rights': {'statement': statement, 'url': url}}
res_meta_list.append(rights)
# add coverage - period
if file_meta_list is not None:
# file type
add_temporal_coverage_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_temporal_coverage_metadata(res_meta_list, extracted_core_meta)
# add coverage - box
if file_meta_list is not None:
# file type
add_spatial_coverage_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_spatial_coverage_metadata(res_meta_list, extracted_core_meta)
# add variables
if file_meta_list is not None:
# file type
add_variable_metadata(file_meta_list, extracted_specific_meta)
else:
# resource type
add_variable_metadata(res_meta_list, extracted_specific_meta)
# add original spatial coverage
if file_meta_list is not None:
# file type
add_original_coverage_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_original_coverage_metadata(res_meta_list, extracted_core_meta)
def add_original_coverage_metadata(metadata_list, extracted_metadata):
"""
Adds data for the original coverage element to the *metadata_list*
:param metadata_list: list to which original coverage data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
ori_cov = {}
if extracted_metadata.get('original-box'):
coverage_data = extracted_metadata['original-box']
projection_string_type = ""
projection_string_text = ""
datum = ""
if extracted_metadata.get('projection-info'):
projection_string_type = extracted_metadata[
'projection-info']['type']
projection_string_text = extracted_metadata[
'projection-info']['text']
datum = extracted_metadata['projection-info']['datum']
ori_cov = {'originalcoverage':
{'value': coverage_data,
'projection_string_type': projection_string_type,
'projection_string_text': projection_string_text,
'datum': datum
}
}
if ori_cov:
metadata_list.append(ori_cov)
def add_creators_metadata(metadata_list, extracted_metadata, existing_creators):
"""
Adds data for creator(s) to the *metadata_list*
:param metadata_list: list to which creator(s) data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:param existing_creators: a QuerySet object for existing creators
:return:
"""
if extracted_metadata.get('creator_name'):
name = extracted_metadata['creator_name']
# add creator only if there is no creator already with the same name
if not existing_creators.filter(name=name).exists():
email = extracted_metadata.get('creator_email', '')
url = extracted_metadata.get('creator_url', '')
creator = {'creator': {'name': name, 'email': email, 'homepage': url}}
metadata_list.append(creator)
def add_contributors_metadata(metadata_list, extracted_metadata, existing_contributors):
"""
Adds data for contributor(s) to the *metadata_list*
:param metadata_list: list to which contributor(s) data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:param existing_contributors: a QuerySet object for existing contributors
:return:
"""
if extracted_metadata.get('contributor_name'):
name_list = extracted_metadata['contributor_name'].split(',')
for name in name_list:
# add contributor only if there is no contributor already with the
# same name
if not existing_contributors.filter(name=name).exists():
contributor = {'contributor': {'name': name}}
metadata_list.append(contributor)
def add_title_metadata(metadata_list, extracted_metadata):
"""
Adds data for the title element to the *metadata_list*
:param metadata_list: list to which title data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('title'):
res_title = {'title': {'value': extracted_metadata['title']}}
metadata_list.append(res_title)
def add_abstract_metadata(metadata_list, extracted_metadata):
"""
Adds data for the abstract (Description) element to the *metadata_list*
:param metadata_list: list to which abstract data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('description'):
description = {'description': {'abstract': extracted_metadata['description']}}
metadata_list.append(description)
def add_variable_metadata(metadata_list, extracted_metadata):
"""
Adds variable(s) related data to the *metadata_list*
:param metadata_list: list to which variable data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
for var_name, var_meta in extracted_metadata.items():
meta_info = {}
for element, value in var_meta.items():
if value != '':
meta_info[element] = value
metadata_list.append({'variable': meta_info})
def add_spatial_coverage_metadata(metadata_list, extracted_metadata):
"""
Adds data for one spatial coverage metadata element to the *metadata_list**
:param metadata_list: list to which spatial coverage data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('box'):
box = {'coverage': {'type': 'box', 'value': extracted_metadata['box']}}
metadata_list.append(box)
def add_temporal_coverage_metadata(metadata_list, extracted_metadata):
"""
Adds data for one temporal metadata element to the *metadata_list*
:param metadata_list: list to which temporal coverage data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('period'):
period = {
'coverage': {'type': 'period', 'value': extracted_metadata['period']}}
metadata_list.append(period)
def add_keywords_metadata(metadata_list, extracted_metadata, file_type=True):
"""
Adds data for subject/keywords element to the *metadata_list*
:param metadata_list: list to which keyword data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:param file_type: If True then this metadata extraction is for netCDF file type, otherwise
metadata extraction is for NetCDF resource
:return:
"""
if extracted_metadata.get('subject'):
keywords = extracted_metadata['subject'].split(',')
if file_type:
metadata_list.append({'subject': keywords})
else:
for keyword in keywords:
metadata_list.append({'subject': {'value': keyword}})
def create_header_info_txt_file(nc_temp_file, nc_file_name):
"""
Creates the header text file using the *nc_temp_file*
:param nc_temp_file: the netcdf file copied from irods to django
for metadata extraction
:return:
"""
if nc_dump.get_nc_dump_string_by_ncdump(nc_temp_file):
dump_str = nc_dump.get_nc_dump_string_by_ncdump(nc_temp_file)
else:
dump_str = nc_dump.get_nc_dump_string(nc_temp_file)
# file name without the extension
temp_dir = os.path.dirname(nc_temp_file)
dump_file_name = nc_file_name + '_header_info.txt'
dump_file = os.path.join(temp_dir, dump_file_name)
if dump_str:
# refine dump_str first line
first_line = list('netcdf {0} '.format(nc_file_name))
first_line_index = dump_str.index('{')
dump_str_list = first_line + list(dump_str)[first_line_index:]
dump_str = "".join(dump_str_list)
with open(dump_file, 'w') as dump_file_obj:
dump_file_obj.write(dump_str)
else:
with open(dump_file, 'w') as dump_file_obj:
dump_file_obj.write("")
return dump_file
def netcdf_file_update(instance, nc_res_file, txt_res_file, user):
log = logging.getLogger()
# check the instance type
file_type = isinstance(instance, NetCDFLogicalFile)
# get the file from irods to temp dir
temp_nc_file = utils.get_file_from_irods(nc_res_file)
nc_dataset = netCDF4.Dataset(temp_nc_file, 'a')
try:
# update title
title = instance.dataset_name if file_type else instance.metadata.title.value
if title.lower() != 'untitled resource':
if hasattr(nc_dataset, 'title'):
delattr(nc_dataset, 'title')
nc_dataset.title = title
# update keywords
keywords = instance.metadata.keywords if file_type \
else [item.value for item in instance.metadata.subjects.all()]
if hasattr(nc_dataset, 'keywords'):
delattr(nc_dataset, 'keywords')
if keywords:
nc_dataset.keywords = ', '.join(keywords)
# update key/value metadata
extra_metadata_dict = instance.metadata.extra_metadata if file_type \
else instance.extra_metadata
if hasattr(nc_dataset, 'hs_extra_metadata'):
delattr(nc_dataset, 'hs_extra_metadata')
if extra_metadata_dict:
extra_metadata = []
for k, v in extra_metadata_dict.items():
extra_metadata.append("{}:{}".format(k, v))
nc_dataset.hs_extra_metadata = ', '.join(extra_metadata)
# update temporal coverage
temporal_coverage = instance.metadata.temporal_coverage if file_type \
else instance.metadata.coverages.all().filter(type='period').first()
for attr_name in ['time_coverage_start', 'time_coverage_end']:
if hasattr(nc_dataset, attr_name):
delattr(nc_dataset, attr_name)
if temporal_coverage:
nc_dataset.time_coverage_start = temporal_coverage.value['start']
nc_dataset.time_coverage_end = temporal_coverage.value['end']
# update spatial coverage
spatial_coverage = instance.metadata.spatial_coverage if file_type \
else instance.metadata.coverages.all().filter(type='box').first()
for attr_name in ['geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lon_min',
'geospatial_lon_max']:
if hasattr(nc_dataset, attr_name):
delattr(nc_dataset, attr_name)
if spatial_coverage:
nc_dataset.geospatial_lat_min = spatial_coverage.value['southlimit']
nc_dataset.geospatial_lat_max = spatial_coverage.value['northlimit']
nc_dataset.geospatial_lon_min = spatial_coverage.value['westlimit']
nc_dataset.geospatial_lon_max = spatial_coverage.value['eastlimit']
# update variables
if instance.metadata.variables.all():
dataset_variables = nc_dataset.variables
for variable in instance.metadata.variables.all():
if variable.name in dataset_variables.keys():
dataset_variable = dataset_variables[variable.name]
# update units
if hasattr(dataset_variable, 'units'):
delattr(dataset_variable, 'units')
if variable.unit != 'Unknown':
dataset_variable.setncattr('units', variable.unit)
# update long_name
if hasattr(dataset_variable, 'long_name'):
delattr(dataset_variable, 'long_name')
if variable.descriptive_name:
dataset_variable.setncattr('long_name', variable.descriptive_name)
# update method
if hasattr(dataset_variable, 'comment'):
delattr(dataset_variable, 'comment')
if variable.method:
dataset_variable.setncattr('comment', variable.method)
# update missing value
if variable.missing_value:
if hasattr(dataset_variable, 'missing_value'):
missing_value = dataset_variable.missing_value
delattr(dataset_variable, 'missing_value')
else:
missing_value = ''
try:
dt = np.dtype(dataset_variable.datatype.name)
missing_value = np.fromstring(variable.missing_value + ' ',
dtype=dt.type, sep=" ")
except:
pass
if missing_value:
dataset_variable.setncattr('missing_value', missing_value)
# Update metadata element that only apply to netCDF resource
if not file_type:
# update summary
if hasattr(nc_dataset, 'summary'):
delattr(nc_dataset, 'summary')
if instance.metadata.description:
nc_dataset.summary = instance.metadata.description.abstract
# update contributor
if hasattr(nc_dataset, 'contributor_name'):
delattr(nc_dataset, 'contributor_name')
contributor_list = instance.metadata.contributors.all()
if contributor_list:
res_contri_name = []
for contributor in contributor_list:
res_contri_name.append(contributor.name)
nc_dataset.contributor_name = ', '.join(res_contri_name)
# update creator
for attr_name in ['creator_name', 'creator_email', 'creator_url']:
if hasattr(nc_dataset, attr_name):
delattr(nc_dataset, attr_name)
creator = instance.metadata.creators.all().filter(order=1).first()
if creator:
nc_dataset.creator_name = creator.name if creator.name else creator.organization
if creator.email:
nc_dataset.creator_email = creator.email
if creator.description or creator.homepage:
nc_dataset.creator_url = creator.homepage if creator.homepage \
else 'https://www.hydroshare.org' + creator.description
# update license
if hasattr(nc_dataset, 'license'):
delattr(nc_dataset, 'license')
if instance.metadata.rights:
nc_dataset.license = "{0} {1}".format(instance.metadata.rights.statement,
instance.metadata.rights.url)
# update reference
if hasattr(nc_dataset, 'references'):
delattr(nc_dataset, 'references')
reference_list = instance.metadata.relations.all().filter(type='cites')
if reference_list:
res_meta_ref = []
for reference in reference_list:
res_meta_ref.append(reference.value)
nc_dataset.references = ' \n'.join(res_meta_ref)
# update source
if hasattr(nc_dataset, 'source'):
delattr(nc_dataset, 'source')
source_list = instance.metadata.sources.all()
if source_list:
res_meta_source = []
for source in source_list:
res_meta_source.append(source.derived_from)
nc_dataset.source = ' \n'.join(res_meta_source)
# close nc dataset
nc_dataset.close()
except Exception as ex:
log.exception(ex.message)
if os.path.exists(temp_nc_file):
shutil.rmtree(os.path.dirname(temp_nc_file))
raise ex
# create the ncdump text file
nc_file_name = os.path.basename(temp_nc_file).split(".")[0]
temp_text_file = create_header_info_txt_file(temp_nc_file, nc_file_name)
# push the updated nc file and the txt file to iRODS
utils.replace_resource_file_on_irods(temp_nc_file, nc_res_file,
user)
utils.replace_resource_file_on_irods(temp_text_file, txt_res_file,
user)
metadata = instance.metadata
metadata.is_dirty = False
metadata.save()
# cleanup the temp dir
if os.path.exists(temp_nc_file):
shutil.rmtree(os.path.dirname(temp_nc_file))
| ResearchSoftwareInstitute/MyHPOM | hs_file_types/models/netcdf.py | Python | bsd-3-clause | 44,192 | [
"NetCDF"
] | d96ae856dbce75e1507fd53a37c95afff1f056f4579fc1f298b7dd2fd99e5a53 |
import MDAnalysis
import numpy as np
try:
from MDAnalysis.analysis import psa
except:
pass
class PSA_sqnormBench(object):
"""Benchmarks for MDAnalysis.analysis.psa.
sqnorm
"""
params = ([2,3,4],
[100,1000,10000],
[None, 0, 1, -1])
# num_cols is equivalent to dimensions
# num_rows is equivalent to i.e., num atoms
param_names = ['num_cols',
'num_rows',
'axis']
def setup(self, num_cols, num_rows, axis):
np.random.seed(170089)
self.v = np.random.rand(num_rows, num_cols)
def time_sqnorm(self, num_cols, num_rows, axis):
"""Benchmark sqnorm in psa module
"""
psa.sqnorm(v=self.v, axis=axis)
class PSA_get_msd_matrixBench(object):
"""Benchmarks for MDAnalysis.analysis.psa.
get_msd_matrix
"""
params = ([10,100,1000],
[5,25,50])
# since the function is defined to work with
# 3N dimension data sets, we will restrict
# benchmarks to that dimensionality
param_names = ['time_steps',
'n_atoms']
def setup(self, time_steps, n_atoms):
np.random.seed(170089)
self.P = np.random.rand(time_steps,
n_atoms,
3)
np.random.seed(971132)
self.Q = np.random.rand(time_steps,
n_atoms,
3)
def time_get_msd_matrix(self, time_steps, n_atoms):
"""Benchmark for get_msd_matrix in psa module
"""
# only default argument for axis is benchmarked
psa.get_msd_matrix(P=self.P,
Q=self.Q,
axis=None)
class PSA_get_coord_axesBench(object):
"""Benchmarks for MDAnalysis.analysis.psa.
get_coord_axes
"""
params = ([10,100,1000],
[5, 25, 50])
param_names = ['time_steps',
'n_atoms']
def setup(self, time_steps, n_atoms):
np.random.seed(170089)
# only using condensed path input
# data structure for now
self.path = np.random.rand(time_steps,
n_atoms * 3)
def time_get_coord_axes(self, time_steps, n_atoms):
"""Benchmark get_coord_axes in psa module
"""
psa.get_coord_axes(path=self.path)
class PSA_get_path_metric_funcBench(object):
"""Benchmark for MDAnalysis.analysis.psa.
get_path_metric_func
"""
params = (['hausdorff',
'weighted_average_hausdorff',
'average_hausdorff',
'hausdorff_neighbors',
'discrete_frechet'])
param_names = ['path_metric']
def time_get_path_metric_func(self, path_metric):
"""Benchmark for get_path_metric_func in psa
module
"""
psa.get_path_metric_func(name=path_metric)
class PSA_metricBench(object):
"""Benchmarks for the various path metric calculations
in the psa module.
"""
params = ([10,100,200],
[5,25,50])
param_names = ['time_steps',
'n_atoms']
def setup(self, time_steps, n_atoms):
np.random.seed(170089)
self.P = np.random.rand(time_steps,
n_atoms,
3)
np.random.seed(971132)
self.Q = np.random.rand(time_steps,
n_atoms,
3)
def time_hausdorff(self, time_steps, n_atoms):
"""Benchmark for hausdorff() in psa module.
"""
psa.hausdorff(P=self.P,
Q=self.Q)
def time_hausdorff_wavg(self, time_steps, n_atoms):
"""Benchmark for hausdorff_wavg() in psa module.
"""
psa.hausdorff_wavg(P=self.P,
Q=self.Q)
def time_hausdorff_avg(self, time_steps, n_atoms):
"""Benchmark for hausdorff_avg() in psa module.
"""
psa.hausdorff_avg(P=self.P,
Q=self.Q)
def time_hausdorff_neighbors(self, time_steps, n_atoms):
"""Benchmark for hausdorff_neighbors() in psa module.
"""
psa.hausdorff_neighbors(P=self.P,
Q=self.Q)
def time_discrete_frechet(self, time_steps, n_atoms):
"""Benchmark for discrete_frechet() in psa module.
"""
psa.discrete_frechet(P=self.P,
Q=self.Q)
| MDAnalysis/mdanalysis | benchmarks/benchmarks/analysis/psa.py | Python | gpl-2.0 | 4,688 | [
"MDAnalysis"
] | 9b3839f1378b3ed9c70bcdd857d6c5e7d98db4a4c4c0a32a16d9ec1c8f3d9ee8 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0007_auto_20150429_0341'),
]
operations = [
migrations.AddField(
model_name='visit',
name='is_completed',
field=models.BooleanField(default=False),
),
]
| koebbe/homeworks | visit/migrations/0008_visit_is_completed.py | Python | mit | 406 | [
"VisIt"
] | 9524f8a9fe6194cfd5d2c4c4fa53a22bad9959d9c339d8d459736bebfc752294 |
# Copyright (C) 2015-2017,2019
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The file was modyfied to support non-standard entries that appears in gromacs topology
# file.
import enum
from collections import namedtuple, defaultdict
import espressopp
from topology_helper import *
__doc__ = """This Python module allows one to use GROMACS data files as the
input to an ESPResSo++ simulation, set interactions for given
particle types and convert GROMACS potential tables into
ESPResSo++ tables.
It containts functions: read(), setInteractions(), convertTable()
"""
GromacsSystem = namedtuple(
'GromacsSystem', [
'defaults',
'types',
'masses',
'charges',
'res_ids',
'atomtypeparams',
'bondtypes',
'bondtypeparams',
'angletypes',
'angletypeparams',
'dihedraltypes',
'dihedraltypeparams',
'pairtypes',
'pairtypeparams',
'nonbond_params',
'exclusions'
])
class Section(enum.Enum):
Defaults = 'defaults'
AtomTypes = 'atomtypes'
NonbondParams = 'nonbond_params'
PairTypes = 'pairtypes'
BondTypes = 'bondtypes'
AngleTypes = 'angletypes'
DihedralTypes = 'dihedraltypes'
Molecules = 'molecules'
def read(top_file="", doRegularExcl=True, defines=None):
""" Read GROMACS data files.
Keyword arguments:
gro_file -- contains coordinates of all particles, the number of particles, velocities and box size.
top_file -- contains topology information. Included topology files (.itp) are also read
doRegularExcl -- if True, exclusions are generated automatically based on the nregxcl parameter (see gromacs manual)
"""
if defines is None:
defines = {}
# read top and itp files
masses, charges, res_ids = [], [], [] # mases and charges of the whole configuration
types = [] # tuple: atomindex(int) to atomtypeid(int)
bonds = {} # dict: key bondtypeid value: tuple of bond pairs
angles = {} # dict: key angletype value: tuple of triples
dihedrals = {} # same...
pairs_1_4 = {} # dict: key pairtype value: tuple of pairs
exclusions = [] # list of atom pairs no considered in non-bonded interactions
defaults = {} # gromacs default values
atomtypeparams = {} # a dict: key atomtypeid , value : class storing actual parameters of each type e.g. c6, c12, etc..
use_atomtypeparams = {} # dict with the atomtypes that are use in the topology
nonbond_params = {}
use_nonbond_params = {}
bondtypeparams = {} # same for bonds
angletypeparams = {} # same for angles
dihedraltypeparams = {} # same for dihedrals
pairtypeparams = {}
use_pairtypeparams = {}
if top_file != "":
# f = open(top_file)
# FileBuffer: a class which behaves like a file, but all lines are in memory
# we use this for emulating a 'preprocessor' which handles the #include
# statements in the .top and .itp files
fb = FileBuffer()
defines = {}
FillFileBuffer(top_file, fb, defines=defines)
f = PostProcessFileBuffer(fb, defines)
print "Reading top file: " + top_file
line = ''
a, p = 0, 0
bondtypecount, angletypecount, dihedraltypecount = 0, 0, 0
readdefaults, readattypes, readnonbondtypes, readpairtypes, readbdtypes, readantypes, readdhtypes = (
False, False, False, False, False, False, False)
current_section = None
previous_section = None
defaults = {} # gromacs default values
atomtypes = {} # a dict: key atomtypename(str) value: atomtypeid(int)
bondtypes = {} # a dict: key atomindex(int),atomindex(int) value: bondtypeid(int)
angletypes = {} # a dict: key atomindex(int), atomindex(int),atomindex(int) value: angletypeid(int)
dihedraltypes = {} # a dict: key atomtindex(int), atomindex(int), atomindex(int),atomindex(int) value: dihedraltypeid(int)
nonbonds = {}
atnum_attype = {}
attypeid_atnum = {}
wildcard_type = None # Use for dihedrals. Special name 'X'.
# it was moved out of "if" statement
# atomtypeparams={} # a dict: key atomtypeid , value : class storing actual parameters of each type e.g. c6, c12, etc..
# bondtypeparams={} # same for bonds
# angletypeparams={} # same for angles
# dihedraltypeparams={} # same for dihedrals
# atomparams={} # key: atomindex(int) value: per atom parameters e.g. q, mass
molecules = []
# molecules = {} # key: moleculeid value: name (string)
readmolecules = False
skip_section = False
for line in f.lines:
line = line.strip()
if not line or line[0] == ";": # skip comment line
continue
if skip_section and line.startswith('#end'):
skip_section = False
continue
if skip_section:
continue
if line.startswith('#ifdef'):
define_tmp = line.split()
if len(define_tmp) > 1:
skip_section = defines.get(define_tmp[1], False)
else:
skip_section = True
continue
if line.startswith('#else'):
skip_section = True
continue
if line.startswith("#define"):
define_tmp = line.split()
defines[define_tmp[1]] = True
continue
# Gets current section
if '[' in line:
section = line.replace('[', '').replace(']', '').strip()
previous_section = current_section
print('Reading new section {} (previous: {})'.format(section, previous_section))
try:
current_section = Section(section)
except ValueError as ex:
print('Section {} not found - skipping ({})'.format(section, ex))
current_section = None
if previous_section == Section.AtomTypes:
atomtypes.update({'X': a})
wildcard_type = a
atnum_attype['X'] = 'X'
continue
if current_section == Section.Defaults:
fields = line.split()
if len(fields) == 5:
defaults = {"nbtype": fields[0], "combinationrule": int(fields[1]),
"genpairs": fields[2], "fudgeLJ": float(fields[3]), "fudgeQQ": float(fields[4])}
else:
defaults = {"nbtype": fields[0], "combinationrule": fields[1]}
if current_section == Section.AtomTypes:
fields = line.split(';')[0].split()
attypename = fields[0]
# make a map containing the properties
# sig, eps may be c6 and c12: this is specified in the defaults
# and converted later
if fields[0].startswith('opls'):
tmpprop = {
'atnum': fields[1],
'mass': float(fields[3]),
'charge': float(fields[4]),
'particletype': fields[5],
'sig': float(fields[6]),
'eps': float(fields[7])
}
atnum_attype[attypename] = fields[1]
elif len(fields) == 7:
tmpprop = {
"atnum": int(fields[1]),
"atname": fields[0],
"mass": float(fields[2]),
"charge": float(fields[3]),
"particletype": fields[4],
"sig": float(fields[5]),
"eps": float(fields[6])}
elif len(fields) == 8:
tmpprop = {
'atnum': fields[1],
'mass': float(fields[3]),
'charge': float(fields[4]),
'sig': float(fields[6]),
'eps': float(fields[7])
}
else:
print('AA other: {}'.format(fields))
tmpprop = {
"atnum": fields[0],
"mass": float(fields[1]),
"charge": float(fields[2]),
"particletype": fields[3],
"sig": float(fields[4]),
"eps": float(fields[5])
}
if attypename not in atomtypes:
atomtypes.update({attypename: a}) # atomtypes is used when reading the "atoms" section
atomtypeparams.update({a: tmpprop})
attypeid_atnum[a] = tmpprop['atnum']
a += 1
if current_section == Section.NonbondParams:
l = line.strip()
fields = l.split(';')[0].split()
if len(fields) == 5:
a1, a2, fn, c6, c12 = fields[:5]
if int(fn) != 1:
continue
at1, at2 = sorted([atomtypes.get(a1), atomtypes.get(a2)])
if (at1, at2) not in nonbond_params:
nonbond_params[(at1, at2)] = {
'sig': float(c6),
'eps': float(c12)
}
if current_section == Section.PairTypes:
fields = line.split(';')[0].split()
if len(fields) > 0:
a1, a2, fn, c6, c12 = fields[:5]
if int(fn) != 1:
continue
at1, at2 = sorted([atomtypes.get(a1), atomtypes.get(a2)])
if at1 and at2:
if (at1, at2) not in pairtypeparams:
pairtypeparams[(at1, at2)] = {
'sig': float(c6),
'eps': float(c12)
}
if current_section == Section.BondTypes:
tmp = line.split()
i, j = tmp[:2]
p = ParseBondTypeParam(line)
# check if this type has been defined before
bdtypeid = FindType(p, bondtypeparams)
if bdtypeid == None:
bdtypeid = len(bondtypeparams)
bondtypeparams.update({bdtypeid: p})
if i in bondtypes:
bondtypes[i].update({j: bdtypeid})
else:
bondtypes.update({i: {j: bdtypeid}})
if j in bondtypes:
bondtypes[j].update({i: bdtypeid})
else:
bondtypes.update({j: {i: bdtypeid}})
if current_section == Section.AngleTypes:
tmp = line.split()
i, j, k = tmp[:3]
p = ParseAngleTypeParam(line)
if p is False:
print('Skip angle line: {}'.format(line))
atypeid = FindType(p, angletypeparams)
if atypeid == None:
atypeid = len(angletypeparams)
angletypeparams.update({atypeid: p})
if i in angletypes:
if j in angletypes[i]:
angletypes[i][j].update({k: atypeid})
else:
angletypes[i].update({j: {k: atypeid}})
else:
angletypes.update({i: {j: {k: atypeid}}})
if current_section == Section.DihedralTypes:
tmp = line.split()
try:
int(tmp[4])
except ValueError:
print('Invalid dihedral type {}'.format(tmp))
continue
i, j, k, l = tmp[:4]
p = ParseDihedralTypeParam(line)
if p is False:
print('Skip dihedral line: {}'.format(line))
continue
dtypeid = FindType(p, dihedraltypeparams)
if dtypeid == None:
dtypeid = len(dihedraltypeparams)
dihedraltypeparams.update({dtypeid: p})
if i in dihedraltypes:
if j in dihedraltypes[i]:
if k in dihedraltypes[i][j]:
dihedraltypes[i][j][k].update({l: dtypeid})
else:
dihedraltypes[i][j].update({k: {l: dtypeid}})
else:
dihedraltypes[i].update({j: {k: {l: dtypeid}}})
else:
dihedraltypes.update({i: {j: {k: {l: dtypeid}}}})
if current_section == Section.Molecules:
print('Reading molecules')
mol, nrmol = line.strip().split()
# we have to check if the same molecules comes multiple times in the molecules section
if len(molecules) == 0:
molecules.append({'name': mol, 'count': int(nrmol)})
elif molecules[-1]['name'] == mol: # check if mol was added earlier already
molecules[-1]['count'] = molecules[-1]['count'] + int(nrmol) # update count
else:
molecules.append({'name': mol, 'count': int(nrmol)}) # if mol newly added
molstartindex = 0 # this is the index of the first atom in the molecule being parsed
res_idx = 0 # index of molecule like single polymer chain.
f.seek(0) # Now we search for bonds, angles definitions and start from the beginning of the file buffer
for mol in molecules:
print('Preparing molecule {name}... ({count})'.format(**mol))
# find and store number of molecules
num_molecule_copies = mol['count']
# this does not what the name suggests....
nrexcl = storeMolecules(f, molecules, mol)
# Local at_types
# find and store atom types
types, masses, charges, num_atoms_molecule, res_ids, at_types = \
storeAtoms(f, defaults, types, atomtypes, atomtypeparams, use_atomtypeparams,
nonbond_params, use_nonbond_params, masses, charges, res_ids,
num_molecule_copies,
res_idx)
# find and store bonds
bonds = storeBonds(f, at_types, bondtypes, bondtypeparams, bonds,
num_atoms_molecule, num_molecule_copies,
molstartindex, attypeid_atnum)
# find and store angles
angles = storeAngles(f, at_types, angletypes, angletypeparams, angles,
num_atoms_molecule, num_molecule_copies, molstartindex,
attypeid_atnum)
# find and store dihedrals
dihedrals = storeDihedrals(f, at_types, dihedraltypes, dihedraltypeparams, dihedrals,
num_atoms_molecule, num_molecule_copies,
molstartindex, atomtypeparams,
wildcard_type, attypeid_atnum)
pairs_1_4 = storePairs(f, defaults, at_types, pairtypeparams, use_pairtypeparams,
atomtypeparams, pairs_1_4,
num_atoms_molecule, num_molecule_copies,
molstartindex)
if doRegularExcl:
storeExclusions(exclusions, nrexcl, bonds)
molstartindex += num_molecule_copies * num_atoms_molecule
res_idx += num_molecule_copies
# Update typeparams
use_keys = [s[0] for s in bonds]
bondtypeparams = {k: v for k, v in bondtypeparams.iteritems() if k in use_keys}
use_keys = [s[0] for s in angles]
angletypeparams = {k: v for k, v in angletypeparams.iteritems() if k in use_keys}
use_keys = [s[0] for s in dihedrals]
dihedraltypeparams = {k: v for k, v in dihedraltypeparams.iteritems() if k in use_keys}
# The data is packed into a touple, unpackvars contains a string which
# tells the user which kind of data was read.
print 'Found default values', defaults
print 'Found {} types'.format(len(types))
print 'Found {} nonbonded_pairs'.format(len(use_nonbond_params))
print 'Found {} masses'.format(len(masses))
print 'Found {} charges'.format(len(charges))
print 'Found {} atom type parameters'.format(len(use_atomtypeparams))
print 'Found {} bonds'.format(len(bonds))
print 'Found {} bond type parameters'.format(len(bondtypeparams))
print 'Found {} angles'.format(len(angles))
print 'Found {} angle type parameters'.format(len(angletypeparams))
print 'Found {} dihedrals'.format(len(dihedrals))
print 'Found {} dihedral type parameters'.format(len(dihedraltypeparams))
print 'Found {} 1-4 pair type parameters'.format(len(use_pairtypeparams))
print 'Found {} 1-4 pairs'.format(len(pairs_1_4))
print 'Found {} bond exclusions'.format(len(exclusions))
gromacs_system = GromacsSystem(
defaults, types, masses, charges, res_ids, use_atomtypeparams,
bonds, bondtypeparams, angles, angletypeparams,
dihedrals, dihedraltypeparams, pairs_1_4, use_pairtypeparams,
use_nonbond_params, exclusions)
return gromacs_system
def storeMolecules(f, molecules, mol=""):
nrexcl = 0
line = ''
line = f.readlastline()
while not 'moleculetype' in line:
line = f.readline()
line = line.split(';')[0]
if not line: break # break out of while if EOF
line = f.readline()
line = line.split(';')[0]
while (not f.eof() and not '[' in line):
if line[0] == ";": # skip comment lines
# print "skipping line: "+line.strip("\n")
line = f.readline()
line = line.split(';')[0]
continue
fields = line.split()
# mol = fields[0]
nrexcl = int(fields[1])
line = f.readline()
line = line.split(';')[0]
return nrexcl
def storeAtoms(f, defaults, types, atomtypes,
atomtypeparams,
use_atomtypeparams,
nonbondedparams,
use_nonbond_params,
masses,
charges,
molecule_index,
num_molecule_copies,
res_idx):
line = ''
types_tmp = []
charge_tmp = []
mass_tmp = []
pos = f.tell()
combinationrule = defaults['combinationrule']
line = f.readlastline()
line = line.split(';')[0]
while not 'atoms' in line:
line = f.readline()
line = line.split(';')[0]
if not line: break # break out of while if EOF
line = f.readline()
line = line.split(';')[0]
while (len(line) > 1 and not '[' in line):
if line[0] == ";": # skip comment lines
line = f.readline()
line = line.split(';')[0]
continue
fields = line.split()
attypeid = atomtypes[fields[1]] # map str type to int type
types_tmp.append(attypeid)
if len(fields) > 6:
# this atom has a charge different from its atomtype
charge_tmp.append(float(fields[6]))
else:
# look up default values for this atom type
charge_tmp.append(atomtypeparams[attypeid]['charge'])
if len(fields) > 7:
# also has a special mass
mass_tmp.append(float(fields[7]))
else:
mass_tmp.append(atomtypeparams[attypeid]['mass'])
use_atomtypeparams.update({attypeid: atomtypeparams[attypeid]})
line = f.readline()
line = line.split(';')[0]
# Convert to sigma/epsilon
if combinationrule == 1:
for k, v in use_atomtypeparams.iteritems():
c6, c12 = float(v['sig']), float(v['eps'])
sig, eps = convertc6c12(c6, c12)
print '{}, Convert C6({}), C12({}) to sig({}), eps({})'.format(
k, c6, c12, sig, eps)
use_atomtypeparams[k]['sig'] = sig
use_atomtypeparams[k]['eps'] = eps
# Prepare nonbonded params to contains only that store in atomtypes
for k, v in nonbondedparams.iteritems():
if k[0] in use_atomtypeparams and k[1] in use_atomtypeparams:
use_nonbond_params.update({k: v})
if combinationrule == 1:
c6, c12 = float(v['sig']), float(v['eps'])
sig, eps = convertc6c12(c6, c12)
print '{}, Convert C6({}), C12({}) to sig({}), eps({})'.format(
k, c6, c12, sig, eps)
use_nonbond_params[k]['sig'] = sig
use_nonbond_params[k]['eps'] = eps
f.seek(pos)
# extend copies of this molecule
num_atoms_molecule = len(types_tmp)
for i in range(num_molecule_copies):
types.extend(types_tmp)
charges.extend(charge_tmp)
masses.extend(mass_tmp)
molecule_index.extend([res_idx + i] * num_atoms_molecule)
return types, masses, charges, num_atoms_molecule, molecule_index, types_tmp
def storePairs(f, defaults, types, pairtypeparams,
use_pairtypeparams,
atomtypeparams, pairs, num_atoms_molecule, num_molecule_copies, molstartindex):
pairs_tmp = []
pos = f.tell()
fudgeLJ = float(defaults.get('fudgeLJ', 1.0))
print('Using fudgeLJ: {}'.format(fudgeLJ))
combinationrule = defaults['combinationrule']
types_pairtypeid = {}
line = f.readlastline()
line = line.split(';')[0]
in_section = False
cross_pairs = False
while line and 'moleculetype' not in line:
line = line.split(';')[0]
if line.startswith('['):
if 'pairs' in line or 'cross_pairs' in line:
in_section = True
cross_pairs = 'cross_pairs' in line
else:
in_section = False
line = f.readline().strip()
line = line.split(';')[0]
continue
elif line.startswith(';'):
line = f.readline().strip()
line = line.split(';')[0]
continue
else:
if in_section:
tmp = line.split(';')[0].split()
lookup = len(tmp) <= 3
pid1, pid2 = sorted(map(int, tmp[0:2]))
t1, t2 = sorted([types[pid1 - 1], types[pid2 - 1]])
pairtypeid = max(use_pairtypeparams) + 1 if use_pairtypeparams else 0
if lookup: # Look for parameters
at1 = atomtypeparams[t1]
at2 = atomtypeparams[t2]
if (t1, t2) in pairtypeparams:
if types_pairtypeid:
pairtypeid = types_pairtypeid.setdefault((t1, t2), max(types_pairtypeid.values()) + 1)
else:
pairtypeid = 0
types_pairtypeid[(t1, t2)] = 0
use_pairtypeparams[pairtypeid] = pairtypeparams[(t1, t2)]
else:
sig_1, eps_1 = at1['sig'], at1['eps']
sig_2, eps_2 = at2['sig'], at2['eps']
eps = fudgeLJ * (eps_1 * eps_2) ** (1.0 / 2.0)
if combinationrule == 2:
sig = 0.5 * (sig_1 + sig_2)
else:
sig = (sig_1 * sig_2) ** (1.0 / 2.0)
pairtypeid = max(use_pairtypeparams) + 1 if use_pairtypeparams else 0
use_pairtypeparams[pairtypeid] = {'sig': sig, 'eps': eps}
pairtypeparams[(t1, t2)] = use_pairtypeparams[pairtypeid]
types_pairtypeid[(t1, t2)] = pairtypeid
pairs_tmp.append((pid1, pid2, pairtypeid, cross_pairs))
else: # Params provided
if int(tmp[2]) != 1:
print('Warning! Supported only pair with type 1, given: {}'.format(
tmp[2]))
line = f.readline()
line = line.split(';')[0]
continue
sig = float(tmp[3])
eps = float(tmp[4])
if combinationrule == 1:
c6, c12 = sig, eps
sig, eps = convertc6c12(c6, c12)
use_pairtypeparams.update({pairtypeid: {
'sig': sig,
'eps': eps
}})
pairs_tmp.append((pid1, pid2, pairtypeid, cross_pairs))
pairtypeid += 1
line = f.readline()
line = line.split(';')[0]
f.seek(pos)
# Extend pairs to copies of molecule
pairs_per_molecule = len(pairs_tmp)
for i in range(num_molecule_copies):
for j in range(pairs_per_molecule):
pid1, pid2, pairtypeid, cross_pairs = pairs_tmp[j]
ia = molstartindex + pid1 + (i * num_atoms_molecule)
ib = molstartindex + pid2 + (i * num_atoms_molecule)
if (pairtypeid, cross_pairs) in pairs:
pairs[(pairtypeid, cross_pairs)].append((ia, ib))
else:
pairs.update({(pairtypeid, cross_pairs): [(ia, ib)]})
return pairs
def storeBonds(f, types, bondtypes, bondtypeparams, bonds, num_atoms_molecule, \
num_molecule_copies, molstartindex, attypeid_atnum):
line = ''
bonds_tmp = []
top = False
pos = f.tell()
line = f.readlastline()
local_exclusions = [] # excluded pairs of atoms within this mol (local ids)
line = f.readline().strip()
line = line.split(';')[0]
in_section = False
cross_bond = False
while line and 'moleculetype' not in line:
line = line.split(';')[0]
if line.startswith('['):
if 'bonds' in line or 'cross_bonds' in line:
in_section = True
cross_bond = 'cross_bond' in line
else:
in_section = False
line = f.readline().strip()
line = line.split(';')[0]
continue
elif line.startswith(';'):
line = f.readline().strip()
line = line.split(';')[0]
continue
else:
if in_section:
tmp = line.split(';')[0].split()
lookup = len(tmp) <= 3
pid1, pid2 = map(int, tmp[0:2])
if lookup:
t1, t2 = types[pid1 - 1], types[pid2 - 1]
if t1 > t2:
t1, t2 = t2, t1
try:
bdtypeid = bondtypes[t1][t2]
except KeyError:
t1, t2 = attypeid_atnum[t1], attypeid_atnum[t2]
try:
bdtypeid = bondtypes[t1][t2]
except KeyError as ex:
print('Bond types for {}-{} ({}-{}) not found'.format(
pid1, pid2, t1, t2))
print('Check your force-field or topology file.')
raise ex
else:
temptype = ParseBondTypeParam(line)
bdtypeid = FindType(temptype, bondtypeparams)
if bdtypeid == None:
bdtypeid = len(bondtypeparams)
bondtypeparams.update({bdtypeid: temptype})
bonds_tmp.append((pid1, pid2, bdtypeid, cross_bond))
if bondtypeparams[bdtypeid].automaticExclusion():
local_exclusions.append((pid1, pid2))
line = f.readline().strip()
line = line.split(';')[0]
f.seek(pos)
# extend bonds to copies of this molecule
bonds_per_mol = len(bonds_tmp)
for i in range(num_molecule_copies):
for j in range(bonds_per_mol):
pid1, pid2, bdtypeid, cross_bond = bonds_tmp[j][0:4]
ia = molstartindex + pid1 + (i * num_atoms_molecule) # index of copy atom i
ib = molstartindex + pid2 + (i * num_atoms_molecule) # index of copy atom j
if (bdtypeid, cross_bond) in bonds:
bonds[(bdtypeid, cross_bond)].append((ia, ib))
else:
bonds.update({(bdtypeid, cross_bond): [(ia, ib)]})
return bonds
def storeExclusions(exclusions, nrexcl, bonds):
print('Processing exclusion lists for nrexcl={}'.format(nrexcl))
if nrexcl > 3:
raise RuntimeError('Currently nrexcl > 3 is not supported')
bond_list = [x for p in bonds.values() for x in p]
exclusions = GenerateRegularExclusions(bond_list, nrexcl, exclusions)
return exclusions
def storeAngles(f, types, angletypes, angletypeparams, angles, num_atoms_molecule, num_molecule_copies, molstartindex,
attypeid_atnum):
line = ''
angles_tmp = []
pos = f.tell()
line = f.readlastline()
line = f.readline().strip()
line = line.split(';')[0]
in_section = False
cross_angle = False
while line and 'moleculetype' not in line:
if line.startswith('['):
if 'angles' in line or 'cross_angles' in line:
in_section = True
cross_angle = 'cross_angles' in line
else:
in_section = False
line = f.readline().strip()
line = line.split(';')[0]
continue
elif line.startswith(';'):
line = f.readline().strip()
line = line.split(';')[0]
continue
else:
if in_section:
tmp = line.split(';')[0].split()
lookup = len(tmp) <= 4
pid1, pid2, pid3 = map(int, tmp[0:3])
if lookup:
t1, t2, t3 = types[pid1 - 1], types[pid2 - 1], types[pid3 - 1]
if t1 not in angletypes and t3 not in angletypes:
t1 = attypeid_atnum[t1]
t2 = attypeid_atnum[t2]
t3 = attypeid_atnum[t3]
try:
typeid = angletypes[t1][t2][t3]
except KeyError:
t1, t3 = t3, t1
try:
typeid = angletypes[t1][t2][t3]
except KeyError as ex:
print('Cannot find params for angle {}-{}-{} (type: {}-{}-{})'.format(
pid1, pid2, pid3, t1, t2, t3))
else:
# Checks if we need to make new type.
temptype = ParseAngleTypeParam(line)
typeid = FindType(temptype, angletypeparams)
if typeid == None:
typeid = len(angletypeparams)
angletypeparams.update({typeid: temptype})
angles_tmp.append((pid1, pid2, pid3, typeid, cross_angle))
line = f.readline()
line = line.split(';')[0]
f.seek(pos)
# extend angles to copies of this molecule
angles_per_mol = len(angles_tmp)
for i in range(num_molecule_copies):
for j in range(angles_per_mol):
pid1, pid2, pid3, antypeid, cross_angle = angles_tmp[j][0:5]
ia = molstartindex + pid1 + (i * num_atoms_molecule) # index of copy atom i
ib = molstartindex + pid2 + (i * num_atoms_molecule) # index of copy atom j
ic = molstartindex + pid3 + (i * num_atoms_molecule) # index of copy atom k
if (antypeid, cross_angle) in angles:
angles[(antypeid, cross_angle)].append((ia, ib, ic))
else:
angles.update({(antypeid, cross_angle): [(ia, ib, ic)]})
return angles
def storeDihedrals(f, types, dihedraltypes, dihedraltypeparams, dihedrals,
num_atoms_molecule, num_molecule_copies, molstartindex,
atomtypeparams, wildcard_type, attypeid_atnum):
line = ''
dihedrals_tmp = []
pos = f.tell()
line = f.readlastline()
line = f.readline().strip()
line = line.split(';')[0]
in_section = False
cross_dih = False
def check_type(t1, t2, t3, t4):
wt = 'X'
combinations = [
(t1, t2, t3, t4),
(wt, t2, t3, t4),
(t1, t2, t3, wt),
(wt, t2, t3, wt),
(t4, t3, t2, t1),
(wt, t3, t2, t1),
(t4, t3, t2, wt),
(wt, t3, t2, wt)
]
for n1, n2, n3, n4 in combinations:
try:
return dihedraltypes[n1][n2][n3][n4]
except KeyError:
continue
return dihedraltypes[n1][n2][n3][n4]
while line and 'moleculetype' not in line:
if line.startswith('['):
if 'dihedrals' in line or 'cross_dihedrals' in line:
in_section = True
cross_dih = 'cross_dihedrals' in line
else:
in_section = False
line = f.readline().strip()
line = line.split(';')[0]
continue
elif line.startswith(';'):
line = f.readline().strip()
line = line.split(';')[0]
continue
else:
if in_section:
# Skip improper dihedrals, not supported yet
if 'improper' in line:
line = f.readline().strip()
line = line.split(';')[0]
continue
tmp = line.split(';')[0].split()
lookup = len(tmp) <= 5
pid1, pid2, pid3, pid4 = map(int, tmp[0:4])
if lookup:
t1, t2, t3, t4 = (types[x - 1] for x in map(int, tmp[0:4]))
try:
dihtypeid = check_type(t1, t2, t3, t4)
except KeyError:
t1, t2, t3, t4 = (
atomtypeparams[t1]['atnum'],
atomtypeparams[t2]['atnum'],
atomtypeparams[t3]['atnum'],
atomtypeparams[t4]['atnum']
)
try:
dihtypeid = check_type(t1, t2, t3, t4)
except KeyError as ex:
print(('Dihedral\n\t- {}\nnot found.'
'Please define parameters in topology file').format(line))
print('{} {} {} {}'.format(t1, t2, t3, t4))
raise ex
else:
# check if we need to make new type
temptype = ParseDihedralTypeParam(line)
dihtypeid = FindType(temptype, dihedraltypeparams)
if dihtypeid == None:
dihtypeid = len(dihedraltypeparams)
dihedraltypeparams.update({dihtypeid: temptype})
dihedrals_tmp.append((pid1, pid2, pid3, pid4, dihtypeid, cross_dih))
line = f.readline()
line = line.split(';')[0]
f.seek(pos)
# extend angles to copies of this molecule
dihedrals_per_mol = len(dihedrals_tmp)
for i in range(num_molecule_copies):
for j in range(dihedrals_per_mol):
pid1, pid2, pid3, pid4, dihtypeid, cross_dih = dihedrals_tmp[j][0:6]
ia = molstartindex + pid1 + (i * num_atoms_molecule) # index of copy atom i
ib = molstartindex + pid2 + (i * num_atoms_molecule) # index of copy atom j
ic = molstartindex + pid3 + (i * num_atoms_molecule) # index of copy atom k
id = molstartindex + pid4 + (i * num_atoms_molecule) # index of copy atom l
if (dihtypeid, cross_dih) in dihedrals:
dihedrals[(dihtypeid, cross_dih)].append((ia, ib, ic, id))
else:
dihedrals.update({(dihtypeid, cross_dih): [(ia, ib, ic, id)]})
return dihedrals
def genParticleList(input_conf, use_velocity=False, use_charge=False, use_adress=False):
"""Generates particle list
Args:
input_conf: The tuple generate by read method.
use_velocity: If set to true then velocity will be read.
use_charge: If set to true then charge will be read.
Returns:
List of property names and particle list.
"""
props = ['id', 'type', 'pos', 'res_id']
use_mass = bool(input_conf.masses)
use_velocity = use_velocity and bool(input_conf.vx)
use_charge = use_charge and bool(input_conf.charges)
if use_mass:
props.append('mass')
if use_velocity:
props.append('v')
if use_charge:
props.append('q')
Particle = namedtuple('Particle', props)
particle_list = []
num_particles = len(input_conf.types)
for pid in range(num_particles):
tmp = [pid + 1,
input_conf.types[pid],
espressopp.Real3D(input_conf.x[pid], input_conf.y[pid], input_conf.z[pid]),
input_conf.res_ids[pid]
]
if use_mass:
tmp.append(input_conf.masses[pid])
if use_velocity:
tmp.append(espressopp.Real3D(input_conf.vx[pid], input_conf.vy[pid], input_conf.vz[pid]))
if use_charge:
tmp.append(input_conf.charges[pid])
particle_list.append(Particle(*tmp))
return props, particle_list
def setBondedInteractions(system, bonds, bondtypeparams, ftpl=None):
ret_list = {}
for (bid, _), bondlist in bonds.iteritems():
if ftpl:
fpl = espressopp.FixedPairListAdress(system.storage, ftpl)
else:
fpl = espressopp.FixedPairList(system.storage)
fpl.addBonds(bondlist)
bdinteraction = bondtypeparams[bid].createEspressoInteraction(system, fpl)
if bdinteraction:
system.addInteraction(bdinteraction, 'bond_{}'.format(bid))
ret_list.update({bid: bdinteraction})
return ret_list
def setPairInteractions(system, pairs, pairtypeparams, cutoff, ftpl=None):
ret_list = {}
for (pid, _), pair_list in pairs.iteritems():
if ftpl:
fpl = espressopp.FixedPairListAdress(system.storage, ftpl)
else:
fpl = espressopp.FixedPairList(system.storage)
params = pairtypeparams[pid]
if params['sig'] > 0.0 and params['eps'] > 0.0:
fpl.addBonds(pair_list)
print 'Pair interaction', params, ' num pairs:', len(pair_list)
interaction = espressopp.interaction.FixedPairListLennardJones(
system,
fpl,
espressopp.interaction.LennardJones(
sigma=params['sig'],
epsilon=params['eps'],
shift='auto',
cutoff=cutoff))
system.addInteraction(interaction, 'lj14_{}'.format(pid))
ret_list[pid] = interaction
return ret_list
def setAngleInteractions(system, angles, angletypeparams, ftpl=None):
ret_list = {}
for (aid, _), anglelist in angles.iteritems():
if ftpl:
fpl = espressopp.FixedTripleListAdress(system.storage, ftpl)
else:
fpl = espressopp.FixedTripleList(system.storage)
fpl.addTriples(anglelist)
angleinteraction = angletypeparams[aid].createEspressoInteraction(system, fpl)
if angleinteraction:
system.addInteraction(angleinteraction, 'angle_{}'.format(aid))
ret_list.update({aid: angleinteraction})
return ret_list
def setDihedralInteractions(system, dihedrals, dihedraltypeparams, ftpl=None):
ret_list = {}
for (did, _), dihedrallist in dihedrals.iteritems():
if ftpl:
fpl = espressopp.FixedQuadrupleListAdress(system.storage, ftpl)
else:
fpl = espressopp.FixedQuadrupleList(system.storage)
fpl.addQuadruples(dihedrallist)
dihedralinteraction = dihedraltypeparams[did].createEspressoInteraction(system, fpl)
if dihedralinteraction:
system.addInteraction(dihedralinteraction, 'dihedral_{}'.format(did))
ret_list.update({did: dihedralinteraction})
return ret_list
def setLennardJonesInteractions(system, defaults, atomtypeparams, verletlist, cutoff, nonbonded_params=None,
hadress=False, ftpl=None, table_groups=None):
""" Set lennard jones interactions which were read from gromacs based on the atomypes"""
if table_groups is None:
table_groups = []
if ftpl:
if hadress:
interaction = espressopp.interaction.VerletListHadressLennardJones(verletlist, ftpl)
else:
interaction = espressopp.interaction.VerletListAdressLennardJones(verletlist, ftpl)
else:
interaction = espressopp.interaction.VerletListLennardJones(verletlist)
if nonbonded_params is None:
nonbonded_params = {}
combinationrule = int(defaults['combinationrule'])
print "Setting up Lennard-Jones interactions"
type_pairs = sorted({
tuple(sorted([type_1, type_2]))
for type_1, pi in atomtypeparams.iteritems()
for type_2, pj in atomtypeparams.iteritems()
if ((pi['atnum'] not in table_groups and pj['atnum'] not in table_groups) and \
(pi.get('atname') not in table_groups and pj.get('atname') not in table_groups))
})
print('Number of pairs: {}'.format(len(type_pairs)))
for type_1, type_2 in type_pairs:
pi = atomtypeparams[type_1]
pj = atomtypeparams[type_2]
if pi['particletype'] == 'V' or pj['particletype'] == 'V':
print('Skip {}-{}'.format(type_1, type_2))
continue
param = nonbonded_params.get((type_1, type_2))
if param:
print 'Using defined non-bonded cross params', param
sig, eps = param['sig'], param['eps']
else:
sig_1, eps_1 = float(pi['sig']), float(pi['eps'])
sig_2, eps_2 = float(pj['sig']), float(pj['eps'])
if combinationrule == 2:
sig = 0.5 * (sig_1 + sig_2)
eps = (eps_1 * eps_2) ** (1.0 / 2.0)
else:
sig = (sig_1 * sig_2) ** (1.0 / 2.0)
eps = (eps_1 * eps_2) ** (1.0 / 2.0)
if sig > 0.0 and eps > 0.0:
print "Setting LJ interaction for", type_1, type_2, "to sig ", sig, "eps", eps, "cutoff", cutoff
ljpot = espressopp.interaction.LennardJones(epsilon=eps, sigma=sig, shift='auto', cutoff=cutoff)
if ftpl:
interaction.setPotentialAT(type1=type_1, type2=type_2, potential=ljpot)
else:
interaction.setPotential(type1=type_1, type2=type_2, potential=ljpot)
system.addInteraction(interaction, 'lj')
return interaction
def setCoulombInteractions(system, verletlist, rc, atomtypeparams,
epsilon1, epsilon2, kappa, hadress=False, adress=False, ftpl=None,
pot=None, interaction=None
):
pref = 138.935485 # we want gromacs units, so this is 1/(4 pi eps_0) ins units of kJ mol^-1 e^-2
type_pairs = sorted({
tuple(sorted([type_1, type_2]))
for type_1, pi in atomtypeparams.iteritems()
for type_2, pj in atomtypeparams.iteritems()
if (#(pi.get('charge', 0.0) != 0.0 and pj.get('charge', 0.0) != 0.0) and \
(pi['particletype'] != 'V' and pj['particletype'] != 'V'))
})
print('Number of coulombic pairs: {}'.format(len(type_pairs)))
if type_pairs:
if pot is None:
pot = espressopp.interaction.ReactionFieldGeneralized(
prefactor=pref, kappa=kappa, epsilon1=epsilon1, epsilon2=epsilon2, cutoff=rc)
if interaction is None:
if hadress and adress:
raise RuntimeError('Ambiguous option, it is only possible to use Adress or HAdress.')
if adress:
interaction = espressopp.interaction.VerletListHybridReactionFieldGeneralized(verletlist)
else:
interaction = espressopp.interaction.VerletListReactionFieldGeneralized(verletlist)
if hadress or adress:
setPotential_fn = interaction.setPotentialAT
else:
setPotential_fn = interaction.setPotential
for type_1, type_2 in type_pairs:
print('Set coulomb interaction: {}-{}'.format(type_1, type_2))
setPotential_fn(type1=type_1, type2=type_2, potential=pot)
return interaction
else:
return None
def setCoulombInteractionsProtein(system, verletlist, rc, types, epsilon1, epsilonprot, epsilonwat, kappa, otype, htype,
hadress=False, adress=False, ftpl=None):
print "# Setting up Coulomb reaction field interactions"
print "# Using ", epsilonwat, " for water and wat-prot and ", epsilonprot, " for protein"
pref = 138.935485 # we want gromacs units, so this is 1/(4 pi eps_0) ins units of kJ mol^-1 e^-2
potwat = espressopp.interaction.ReactionFieldGeneralized(prefactor=pref, kappa=kappa, epsilon1=epsilon1,
epsilon2=epsilonwat, cutoff=rc)
potprot = espressopp.interaction.ReactionFieldGeneralized(prefactor=pref, kappa=kappa, epsilon1=epsilon1,
epsilon2=epsilonprot, cutoff=rc)
if hadress and adress:
print "Error! In gromacs.setCoulombInteractions, you cannot use adress and hadress at the same time"
return
if hadress:
interaction = espressopp.interaction.VerletListHadressReactionFieldGeneralized(verletlist, ftpl)
elif adress:
interaction = espressopp.interaction.VerletListAdressReactionFieldGeneralized(verletlist, ftpl)
else:
interaction = espressopp.interaction.VerletListReactionFieldGeneralized(verletlist)
for i in range(max(types) + 1):
for k in range(i, max(types) + 1):
if i == otype or i == htype or k == otype or k == htype:
if hadress or adress:
interaction.setPotentialAT(type1=i, type2=k, potential=potwat)
else:
interaction.setPotential(type1=i, type2=k, potential=potwat)
else:
if hadress or adress:
interaction.setPotentialAT(type1=i, type2=k, potential=potprot)
else:
interaction.setPotential(type1=i, type2=k, potential=potprot)
system.addInteraction(interaction, 'coulomb_protein')
return interaction
def setCoulomb14Interactions(system, defaults, onefourlist, rc, types):
# in Gromas, 1-4 interactions don't have reaction field correction
print "# Setting up 1-4 Coulomb interactions"
if defaults:
fudge = float(defaults['fudgeQQ'])
print "# Using electrostatics 1-4 fudge factor ", fudge
pref = 138.935485 * fudge # we want gromacs units, so this is 1/(4 pi eps_0) ins units of kJ mol^-1 e^-2, scaled by fudge factor
# pot = espressopp.interaction.CoulombRSpace(prefactor=pref, alpha=0.0, cutoff=rc)
pot = espressopp.interaction.CoulombTruncated(prefactor=pref, cutoff=rc)
# interaction=espressopp.interaction.FixedPairListTypesCoulombRSpace(system,onefourlist)
interaction = espressopp.interaction.FixedPairListTypesCoulombTruncated(system, onefourlist)
for i in range(max(types) + 1):
for k in range(i, max(types) + 1):
interaction.setPotential(type1=i, type2=k, potential=pot)
system.addInteraction(interaction, 'coulomb14')
return interaction
def setTabulatedInteractions(system, atomtypeparams, vl, cutoff, interaction=None, ftpl=None, table_groups=None):
"""Sets tabulated potential for types that has particletype set to 'V'."""
spline_type = 1
if table_groups is None:
table_groups = []
type_pairs = {
tuple(sorted([type_1, type_2]))
for type_1, v1 in atomtypeparams.iteritems()
for type_2, v2 in atomtypeparams.iteritems()
if (v1['atnum'] in table_groups and v2['atnum'] in table_groups)
}
if len(type_pairs) > 0:
if interaction is None:
if ftpl:
interaction = espressopp.interaction.VerletListAdressTabulated(vl, ftpl)
else:
interaction = espressopp.interaction.VerletListTabulated(vl)
else:
if not ftpl:
interaction = espressopp.interaction.VerletListTabulated(vl)
for type_1, type_2 in type_pairs:
print('Set tabulated potential {}-{}'.format(type_1, type_2))
name_1 = atomtypeparams[type_1]['atnum']
name_2 = atomtypeparams[type_2]['atnum']
name_1, name_2 = sorted([name_1, name_2])
table_name = 'table_{}_{}.pot'.format(name_1, name_2)
if not os.path.exists(table_name):
orig_table_name = 'table_{}_{}.xvg'.format(name_1, name_2)
print('Converting table_{name1}_{name2}.xvg to table_{name1}_{name2}.pot'.format(
name1=name_1, name2=name_2))
convertTable(orig_table_name, table_name)
if ftpl:
interaction.setPotentialCG(
type1=type_1,
type2=type_2,
potential=espressopp.interaction.Tabulated(
itype=spline_type,
filename=table_name,
cutoff=cutoff))
else:
interaction.setPotential(
type1=type_1,
type2=type_2,
potential=espressopp.interaction.Tabulated(
itype=spline_type,
filename=table_name,
cutoff=cutoff))
if interaction and not ftpl:
system.addInteraction(interaction, 'lj_tab')
return interaction
else:
return None
| bakery-cg2at/bakery | src/gromacs_topology.py | Python | gpl-3.0 | 51,694 | [
"ESPResSo",
"Gromacs"
] | 97c7532369c8d2fd3347f6b8701d0bd0c697f63bf8599ee5bc65a9dc345c0677 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.chronos.model.arima import ARIMAModel
import numpy as np
import os
from numpy.testing import assert_array_almost_equal
import pandas as pd
class TestARIMAModel(ZooTestCase):
def setup_method(self, method):
np.random.seed(0)
self.seq_len = 400
self.config = {
"p": np.random.randint(0, 4),
"q": np.random.randint(0, 4),
"seasonality_mode": np.random.choice([True, False]),
"P": 5,
"Q": 5,
"m": np.random.choice([4, 7]),
"metric": "mse",
}
self.model = ARIMAModel()
self.data = np.random.rand(self.seq_len)
self.horizon = np.random.randint(2, 50)
self.validation_data = np.random.rand(self.horizon)
def teardown_method(self, method):
del self.model
del self.data
del self.validation_data
def test_arima(self):
# test fit_eval
evaluate_result = self.model.fit_eval(data=self.data,
validation_data=self.validation_data,
**self.config
)
# test predict
result = self.model.predict(horizon=self.horizon)
assert len(result) == self.horizon
# test evaluate
evaluate_result = self.model.evaluate(target=self.validation_data, metrics=['mae', 'smape'])
assert len(evaluate_result) == 2
# test rolling predict
rolling_result = self.model.predict(horizon=self.horizon, rolling=True)
assert len(rolling_result) == self.horizon
def test_error(self):
with pytest.raises(ValueError, match="x should be None"):
self.model.predict(x=1)
with pytest.raises(ValueError, match="We don't support input x currently"):
self.model.evaluate(target=self.validation_data, x=1)
with pytest.raises(ValueError, match="Input invalid target of None"):
self.model.evaluate(target=None)
with pytest.raises(Exception,
match="Needs to call fit_eval or restore first before calling predict"):
self.model.predict(horizon=self.horizon)
with pytest.raises(Exception,
match="We don't support updating model "
"without rolling prediction currently"
):
self.model.predict(horizon=self.horizon, update=True, rolling=False)
with pytest.raises(Exception,
match="Needs to call fit_eval or restore first before calling evaluate"):
self.model.evaluate(target=self.validation_data, x=None)
with pytest.raises(Exception,
match="Needs to call fit_eval or restore first before calling save"):
model_file = "tmp.pkl"
self.model.save(model_file)
def test_save_restore(self):
self.model.fit_eval(data=self.data, validation_data=self.validation_data, **self.config)
result_save = self.model.predict(horizon=self.horizon, rolling=False)
model_file = "tmp.pkl"
self.model.save(model_file)
assert os.path.isfile(model_file)
new_model = ARIMAModel()
new_model.restore(model_file)
assert new_model.model
result_restore = new_model.predict(horizon=self.horizon, rolling=False)
assert_array_almost_equal(result_save, result_restore, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(result_save, result_restore)
os.remove(model_file)
if __name__ == '__main__':
pytest.main([__file__])
| intel-analytics/BigDL | python/chronos/test/bigdl/chronos/model/test_arima.py | Python | apache-2.0 | 4,423 | [
"ORCA"
] | 2513e1270762876b668de1d1f8f52e544dadd1ebd8cb83a1f9d1e60ad39225d2 |
########################################################################
# $HeadURL$
# File : AgentModule.py
# Author : Adria Casajus
########################################################################
"""
Base class for all agent modules
"""
__RCSID__ = "$Id$"
import os
import threading
import types
import time
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, gMonitor, rootPath
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.Utilities import Time, MemStat
def _checkDir( path ):
try:
os.makedirs( path )
except Exception:
pass
if not os.path.isdir( path ):
raise Exception( 'Can not create %s' % path )
class AgentModule:
""" Base class for all agent modules
This class is used by the AgentReactor Class to steer the execution of
DIRAC Agents.
For this purpose the following methods are used:
- am_initialize() just after instantiated
- am_getPollingTime() to set the execution frequency
- am_getMaxCycles() to determine the number of cycles
- am_go() for the actual execution of one cycle
Before each iteration, the following methods are used to determine
if the new cycle is to be started.
- am_getModuleParam( 'alive' )
- am_checkStopAgentFile()
- am_removeStopAgentFile()
To start new execution cycle the following methods are used
- am_getCyclesDone()
- am_setOption( 'MaxCycles', maxCycles )
At the same time it provides all Agents with common interface.
All Agent class must inherit from this base class and must implement
at least the following method:
- execute() main method called in the agent cycle
Additionally they may provide:
- initialize() for initial settings
- finalize() the graceful exit
- beginExecution() before each execution cycle
- endExecution() at the end of each execution cycle
The agent can be stopped either by a signal or by creating a 'stop_agent' file
in the controlDirectory defined in the agent configuration
"""
def __init__( self, agentName, loadName, baseAgentName = False, properties = {} ):
"""
Common __init__ method for all Agents.
All Agent modules must define:
__doc__
__RCSID__
They are used to populate __codeProperties
The following Options are used from the Configuration:
- /LocalSite/InstancePath
- /DIRAC/Setup
- Status
- Enabled
- PollingTime default = 120
- MaxCycles default = 500
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy ''
- shifterProxyLocation WorkDirectory/SystemName/AgentName/.shifterCred
It defines the following default Options that can be set via Configuration (above):
- MonitoringEnabled True
- Enabled True if Status == Active
- PollingTime 120
- MaxCycles 500
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy False
- shifterProxyLocation work/SystemName/AgentName/.shifterCred
different defaults can be set in the initialize() method of the Agent using am_setOption()
In order to get a shifter proxy in the environment during the execute()
the configuration Option 'shifterProxy' must be set, a default may be given
in the initialize() method.
"""
if baseAgentName and agentName == baseAgentName:
self.log = gLogger
standaloneModule = True
else:
self.log = gLogger.getSubLogger( agentName, child = False )
standaloneModule = False
self.__basePath = gConfig.getValue( '/LocalSite/InstancePath', rootPath )
self.__agentModule = None
self.__codeProperties = {}
self.__getCodeInfo()
self.__moduleProperties = { 'fullName' : agentName,
'loadName' : loadName,
'section' : PathFinder.getAgentSection( agentName ),
'loadSection' : PathFinder.getAgentSection( loadName ),
'standalone' : standaloneModule,
'cyclesDone' : 0,
'totalElapsedTime' : 0,
'setup' : gConfig.getValue( "/DIRAC/Setup", "Unknown" ),
'alive' : True }
self.__moduleProperties[ 'system' ], self.__moduleProperties[ 'agentName' ] = agentName.split( "/" )
self.__configDefaults = {}
self.__configDefaults[ 'MonitoringEnabled'] = True
self.__configDefaults[ 'Enabled'] = self.am_getOption( "Status", "Active" ).lower() in ( 'active' )
self.__configDefaults[ 'PollingTime'] = self.am_getOption( "PollingTime", 120 )
self.__configDefaults[ 'MaxCycles'] = self.am_getOption( "MaxCycles", 500 )
self.__configDefaults[ 'ControlDirectory' ] = os.path.join( self.__basePath,
'control',
*agentName.split( "/" ) )
self.__configDefaults[ 'WorkDirectory' ] = os.path.join( self.__basePath,
'work',
*agentName.split( "/" ) )
self.__configDefaults[ 'shifterProxy' ] = ''
self.__configDefaults[ 'shifterProxyLocation' ] = os.path.join( self.__configDefaults[ 'WorkDirectory' ],
'.shifterCred' )
if type( properties ) == types.DictType:
for key in properties:
self.__moduleProperties[ key ] = properties[ key ]
self.__moduleProperties[ 'executors' ] = [ ( self.execute, () ) ]
self.__moduleProperties[ 'shifterProxy' ] = False
self.__monitorLastStatsUpdate = -1
self.monitor = None
self.__initializeMonitor()
self.__initialized = False
def __getCodeInfo( self ):
versionVar = "__RCSID__"
docVar = "__doc__"
try:
self.__agentModule = __import__( self.__class__.__module__,
globals(),
locals(),
versionVar )
except Exception:
self.log.exception( "Cannot load agent module" )
for prop in ( ( versionVar, "version" ), ( docVar, "description" ) ):
try:
self.__codeProperties[ prop[1] ] = getattr( self.__agentModule, prop[0] )
except Exception:
self.log.error( "Missing property", prop[0] )
self.__codeProperties[ prop[1] ] = 'unset'
self.__codeProperties[ 'DIRACVersion' ] = DIRAC.version
self.__codeProperties[ 'platform' ] = DIRAC.platform
def am_initialize( self, *initArgs ):
agentName = self.am_getModuleParam( 'fullName' )
result = self.initialize( *initArgs )
if not isReturnStructure( result ):
return S_ERROR( "initialize must return S_OK/S_ERROR" )
if not result[ 'OK' ]:
return S_ERROR( "Error while initializing %s: %s" % ( agentName, result[ 'Message' ] ) )
_checkDir( self.am_getControlDirectory() )
workDirectory = self.am_getWorkDirectory()
_checkDir( workDirectory )
# Set the work directory in an environment variable available to subprocesses if needed
os.environ['AGENT_WORKDIRECTORY'] = workDirectory
self.__moduleProperties[ 'shifterProxy' ] = self.am_getOption( 'shifterProxy' )
if self.am_monitoringEnabled():
self.monitor.enable()
if len( self.__moduleProperties[ 'executors' ] ) < 1:
return S_ERROR( "At least one executor method has to be defined" )
if not self.am_Enabled():
return S_ERROR( "Agent is disabled via the configuration" )
self.log.notice( "="*40 )
self.log.notice( "Loaded agent module %s" % self.__moduleProperties[ 'fullName' ] )
self.log.notice( " Site: %s" % DIRAC.siteName() )
self.log.notice( " Setup: %s" % gConfig.getValue( "/DIRAC/Setup" ) )
self.log.notice( " Base Module version: %s " % __RCSID__ )
self.log.notice( " Agent version: %s" % self.__codeProperties[ 'version' ] )
self.log.notice( " DIRAC version: %s" % DIRAC.version )
self.log.notice( " DIRAC platform: %s" % DIRAC.platform )
pollingTime = int( self.am_getOption( 'PollingTime' ) )
if pollingTime > 3600:
self.log.notice( " Polling time: %s hours" % ( pollingTime / 3600. ) )
else:
self.log.notice( " Polling time: %s seconds" % self.am_getOption( 'PollingTime' ) )
self.log.notice( " Control dir: %s" % self.am_getControlDirectory() )
self.log.notice( " Work dir: %s" % self.am_getWorkDirectory() )
if self.am_getOption( 'MaxCycles' ) > 0:
self.log.notice( " Cycles: %s" % self.am_getMaxCycles() )
else:
self.log.notice( " Cycles: unlimited" )
self.log.notice( "="*40 )
self.__initialized = True
return S_OK()
def am_getControlDirectory( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'ControlDirectory' ) ) )
def am_getStopAgentFile( self ):
return os.path.join( self.am_getControlDirectory(), 'stop_agent' )
def am_checkStopAgentFile( self ):
return os.path.isfile( self.am_getStopAgentFile() )
def am_createStopAgentFile( self ):
try:
fd = open( self.am_getStopAgentFile(), 'w' )
fd.write( 'Dirac site agent Stopped at %s' % Time.toString() )
fd.close()
except Exception:
pass
def am_removeStopAgentFile( self ):
try:
os.unlink( self.am_getStopAgentFile() )
except Exception:
pass
def am_getBasePath( self ):
return self.__basePath
def am_getWorkDirectory( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'WorkDirectory' ) ) )
def am_getShifterProxyLocation( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'shifterProxyLocation' ) ) )
def am_getOption( self, optionName, defaultValue = None ):
if defaultValue == None:
if optionName in self.__configDefaults:
defaultValue = self.__configDefaults[ optionName ]
if optionName and optionName[0] == "/":
return gConfig.getValue( optionName, defaultValue )
for section in ( self.__moduleProperties[ 'section' ], self.__moduleProperties[ 'loadSection' ] ):
result = gConfig.getOption( "%s/%s" % ( section, optionName ), defaultValue )
if result[ 'OK' ]:
return result[ 'Value' ]
return defaultValue
def am_setOption( self, optionName, value ):
self.__configDefaults[ optionName ] = value
def am_getModuleParam( self, optionName ):
return self.__moduleProperties[ optionName ]
def am_setModuleParam( self, optionName, value ):
self.__moduleProperties[ optionName ] = value
def am_getPollingTime( self ):
return self.am_getOption( "PollingTime" )
def am_getMaxCycles( self ):
return self.am_getOption( "MaxCycles" )
def am_getCyclesDone( self ):
return self.am_getModuleParam( 'cyclesDone' )
def am_Enabled( self ):
return self.am_getOption( "Enabled" )
def am_disableMonitoring( self ):
self.am_setOption( 'MonitoringEnabled' , False )
def am_monitoringEnabled( self ):
return self.am_getOption( "MonitoringEnabled" )
def am_stopExecution( self ):
self.am_setModuleParam( 'alive', False )
def __initializeMonitor( self ):
"""
Initialize the system monitor client
"""
if self.__moduleProperties[ 'standalone' ]:
self.monitor = gMonitor
else:
self.monitor = MonitoringClient()
self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )
self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )
self.monitor.initialize()
self.monitor.registerActivity( 'CPU', "CPU Usage", 'Framework', "CPU,%", self.monitor.OP_MEAN, 600 )
self.monitor.registerActivity( 'MEM', "Memory Usage", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )
# Component monitor
for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):
self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )
self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )
self.monitor.setComponentExtraParam( 'cycles', 0 )
self.monitor.disable()
self.__monitorLastStatsUpdate = time.time()
def am_secureCall( self, functor, args = (), name = False ):
if not name:
name = str( functor )
try:
result = functor( *args )
if not isReturnStructure( result ):
raise Exception( "%s method for %s module has to return S_OK/S_ERROR" % ( name, self.__moduleProperties[ 'fullName' ] ) )
return result
except Exception, e:
self.log.exception( "Agent exception while calling method", name )
return S_ERROR( "Exception while calling %s method: %s" % ( name, str( e ) ) )
def _setShifterProxy( self ):
if self.__moduleProperties[ "shifterProxy" ]:
result = setupShifterProxyInEnv( self.__moduleProperties[ "shifterProxy" ],
self.am_getShifterProxyLocation() )
if not result[ 'OK' ]:
self.log.error( "Failed to set shifter proxy", result['Message'] )
return result
return S_OK()
def am_go( self ):
# Set the shifter proxy if required
result = self._setShifterProxy()
if not result[ 'OK' ]:
return result
self.log.notice( "-"*40 )
self.log.notice( "Starting cycle for module %s" % self.__moduleProperties[ 'fullName' ] )
mD = self.am_getMaxCycles()
if mD > 0:
cD = self.__moduleProperties[ 'cyclesDone' ]
self.log.notice( "Remaining %s of %s cycles" % ( mD - cD, mD ) )
self.log.notice( "-"*40 )
elapsedTime = time.time()
cpuStats = self._startReportToMonitoring()
cycleResult = self.__executeModuleCycle()
if cpuStats:
self._endReportToMonitoring( *cpuStats )
# Increment counters
self.__moduleProperties[ 'cyclesDone' ] += 1
# Show status
elapsedTime = time.time() - elapsedTime
self.__moduleProperties[ 'totalElapsedTime' ] += elapsedTime
self.log.notice( "-"*40 )
self.log.notice( "Agent module %s run summary" % self.__moduleProperties[ 'fullName' ] )
self.log.notice( " Executed %s times previously" % self.__moduleProperties[ 'cyclesDone' ] )
self.log.notice( " Cycle took %.2f seconds" % elapsedTime )
averageElapsedTime = self.__moduleProperties[ 'totalElapsedTime' ] / self.__moduleProperties[ 'cyclesDone' ]
self.log.notice( " Average execution time: %.2f seconds" % ( averageElapsedTime ) )
elapsedPollingRate = averageElapsedTime * 100 / self.am_getOption( 'PollingTime' )
self.log.notice( " Polling time: %s seconds" % self.am_getOption( 'PollingTime' ) )
self.log.notice( " Average execution/polling time: %.2f%%" % elapsedPollingRate )
if cycleResult[ 'OK' ]:
self.log.notice( " Cycle was successful" )
else:
self.log.warn( " Cycle had an error:", cycleResult[ 'Message' ] )
self.log.notice( "-"*40 )
# Update number of cycles
self.monitor.setComponentExtraParam( 'cycles', self.__moduleProperties[ 'cyclesDone' ] )
return cycleResult
def _startReportToMonitoring( self ):
try:
now = time.time()
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 10:
return ( now, cpuTime )
# Send CPU consumption mark
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB( 'VmRSS:' )
if membytes:
mem = membytes / ( 1024. * 1024. )
gMonitor.addMark( 'MEM', mem )
return( now, cpuTime )
except Exception:
return False
def _endReportToMonitoring( self, initialWallTime, initialCPUTime ):
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = cpuTime / wallTime * 100.
if percentage > 0:
gMonitor.addMark( 'CPU', percentage )
def __executeModuleCycle( self ):
# Execute the beginExecution function
result = self.am_secureCall( self.beginExecution, name = "beginExecution" )
if not result[ 'OK' ]:
return result
# Launch executor functions
executors = self.__moduleProperties[ 'executors' ]
if len( executors ) == 1:
result = self.am_secureCall( executors[0][0], executors[0][1] )
if not result[ 'OK' ]:
return result
else:
exeThreads = [ threading.Thread( target = executor[0], args = executor[1] ) for executor in executors ]
for thread in exeThreads:
thread.setDaemon( 1 )
thread.start()
for thread in exeThreads:
thread.join()
# Execute the endExecution function
return self.am_secureCall( self.endExecution, name = "endExecution" )
def initialize( self, *args, **kwargs ):
return S_OK()
def beginExecution( self ):
return S_OK()
def endExecution( self ):
return S_OK()
def finalize( self ):
return S_OK()
def execute( self ):
return S_ERROR( "Execute method has to be overwritten by agent module" )
| Sbalbp/DIRAC | Core/Base/AgentModule.py | Python | gpl-3.0 | 17,594 | [
"DIRAC"
] | 443b378dc07d95046facc08ad411b8a99b0dda926d90d89c0e6e1f56c9d5cbbd |
#!/usr/bin/python
import os
import sys
import time
import glob
import trace
import tempfile
tmpdir = tempfile.mkdtemp(prefix='ase-')
os.chdir(tmpdir)
def build():
if os.system('svn checkout ' +
'https://svn.fysik.dtu.dk/projects/ase/trunk ase') != 0:
raise RuntimeError('Checkout of ASE failed!')
os.chdir('ase')
if os.system('python setup.py install --home=.') != 0:
raise RuntimeError('Installation failed!')
sys.path.insert(0, 'lib/python')
from ase.test import test
from ase.version import version
# Run test-suite:
stream = open('test-results.txt', 'w')
results = test(verbosity=2, dir='ase/test', display=False, stream=stream)
stream.close()
if len(results.failures) > 0 or len(results.errors) > 0:
address = 'ase-developers@listserv.fysik.dtu.dk'
subject = 'ASE test-suite failed!'
os.system('mail -s "%s" %s < %s' %
(subject, address, 'test-results.txt'))
raise RuntimeError('Testsuite failed!')
# Generate tar-file:
assert os.system('python setup.py sdist') == 0
if os.system('epydoc --docformat restructuredtext --parse-only ' +
'--name ASE ' +
'--url http://wiki.fysik.dtu.dk/ase ' +
'--show-imports --no-frames -v ase >& epydoc.out') != 0:
raise RuntimeError('Epydoc failed!')
epydoc_errors = open('epydoc.out').read()
if ' Warning:' in epydoc_errors:
sys.stderr.write(epydoc_errors)
os.chdir('doc')
os.mkdir('_build')
if os.system('PYTHONPATH=%s/ase sphinx-build . _build' % tmpdir) != 0:
raise RuntimeError('Sphinx failed!')
os.system('cd _build; cp _static/searchtools.js .; ' +
'sed -i s/snapshot.tar/%s.tar/ download.html' % version)
if 1:
if os.system('PYTHONPATH=%s/ase ' % tmpdir +
'sphinx-build -b latex . _build 2> error') != 0:
raise RuntimeError('Sphinx failed!')
os.system(
'grep -v "WARNING: unusable reference target found" error 1>&2')
os.chdir('_build')
#os.system('cd ../..; ln -s doc/_static')
if os.system('make ase-manual.pdf 2>&1') != 0:
raise RuntimeError('pdflatex failed!')
else:
os.chdir('_build')
assert os.system('mv ../../html epydoc;' +
'mv ../../dist/python-ase-%s.tar.gz .' % version) == 0
tarfiledir = None
if len(sys.argv) == 2:
tarfiledir = sys.argv[1]
try:
os.remove(tarfiledir + '/ase-webpages.tar.gz')
except OSError:
pass
build()
if tarfiledir is not None:
os.system('cd ..; tar czf %s/ase-webpages.tar.gz _build' % tarfiledir)
os.system('cd; rm -r ' + tmpdir)
| slabanja/ase | tools/sphinx.py | Python | gpl-2.0 | 2,767 | [
"ASE"
] | 9dfe0de966c3b4367f6cd5894b493c97953d126d04610a3ae6eb8e2a37b55ad4 |
# -*- coding: utf-8 -*-
#
# test_synaptic_elements.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'naveau'
import nest
import unittest
class TestSynapticElements(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
def test_set_status(self):
synaptic_element_dict = {u'SE': {u'z': 15.0, u'growth_curve': u'linear'}}
neuron = nest.Create('iaf_neuron', 1)
nest.SetStatus(neuron, {'synaptic_elements': synaptic_element_dict})
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertIn('SE', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict[u'SE'], neuron_synaptic_elements[u'SE'])
def test_set_status_overwrite(self):
synaptic_element_dict1 = {u'SE1': {u'z': 15.0, u'growth_curve': u'linear'}}
synaptic_element_dict2 = {u'SE2': {u'z': 10.0, u'growth_curve': u'gaussian'}}
neuron = nest.Create('iaf_neuron', 1)
nest.SetStatus(neuron, {'synaptic_elements': synaptic_element_dict1})
nest.SetStatus(neuron, {'synaptic_elements': synaptic_element_dict2})
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertNotIn('SE1', neuron_synaptic_elements)
self.assertIn('SE2', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict2[u'SE2'], neuron_synaptic_elements[u'SE2'])
def test_set_defaults(self):
synaptic_element_dict = {u'SE': {u'z': 15.0, u'growth_curve': u'linear'}}
nest.SetDefaults('iaf_neuron', {'synaptic_elements': synaptic_element_dict})
neuron = nest.Create('iaf_neuron', 1)
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertIn('SE', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict[u'SE'], neuron_synaptic_elements[u'SE'])
def test_set_defaults_overwrite(self):
synaptic_element_dict1 = {u'SE1': {u'z': 15.0, u'growth_curve': u'linear'}}
synaptic_element_dict2 = {u'SE2': {u'z': 10.0, u'growth_curve': u'gaussian'}}
nest.SetDefaults('iaf_neuron', {'synaptic_elements': synaptic_element_dict1})
nest.SetDefaults('iaf_neuron', {'synaptic_elements': synaptic_element_dict2})
neuron = nest.Create('iaf_neuron', 1)
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertNotIn('SE1', neuron_synaptic_elements)
self.assertIn('SE2', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict2[u'SE2'], neuron_synaptic_elements[u'SE2'])
def suite():
test_suite = unittest.makeSuite(TestSynapticElements, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
| magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/pynest/nest/tests/test_sp/test_synaptic_elements.py | Python | gpl-2.0 | 3,448 | [
"Gaussian",
"NEURON"
] | 80bace34e2d71d3556c87cc61a10ed56695c9f91916c866b5939a5ee28f5fec5 |
"""
Finding a signal in a background
--------------------------------
Figure 5.26
Fitting a model of a signal in an unknown background. The histogram in the
top-right panel visualizes a sample drawn from a Gaussian signal plus a
uniform background model given by eq. 5.83 and shown by the line. The remaining
panels show projections of the three-dimensional posterior pdf, based on a
20,000 point MCMC chain.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
# Hack to fix import issue in older versions of pymc
import scipy
import scipy.misc
scipy.derivative = scipy.misc.derivative
import pymc
from astroML.plotting import plot_mcmc
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# Set up dataset: gaussian signal in a uniform background
np.random.seed(0)
N = 100
A_true = 0.3
W_true = 10
x0_true = 6
sigma_true = 0.3
signal = stats.norm(x0_true, sigma_true)
background = stats.uniform(0, W_true)
x = np.random.random(N)
i_sig = x < A_true
i_bg = ~i_sig
x[i_sig] = signal.rvs(np.sum(i_sig))
x[i_bg] = background.rvs(np.sum(i_bg))
#----------------------------------------------------------------------
# Set up MCMC sampling
A = pymc.Uniform('A', 0, 1, value=0.5)
x0 = pymc.Uniform('x0', 0, 10, value=5)
log_sigma = pymc.Uniform('log_sigma', -5, 5, value=0)
@pymc.deterministic
def sigma(log_sigma=log_sigma):
return np.exp(log_sigma)
def sigbg_like(x, A, x0, sigma):
"""signal + background likelihood"""
return np.sum(np.log(A * np.exp(-0.5 * ((x - x0) / sigma) ** 2)
/ np.sqrt(2 * np.pi) / sigma
+ (1 - A) / W_true))
SigBG = pymc.stochastic_from_dist('sigbg',
logp=sigbg_like,
dtype=np.float, mv=True)
M = SigBG('M', A, x0, sigma, observed=True, value=x)
model = dict(M=M, A=A, x0=x0, log_sigma=log_sigma, sigma=sigma)
#----------------------------------------------------------------------
# Run the MCMC sampling
S = pymc.MCMC(model)
S.sample(iter=25000, burn=5000)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
ax_list = plot_mcmc([S.trace(s)[:] for s in ['A', 'x0', 'sigma']],
limits=[(0.05, 0.65), (5.75, 6.65), (0.05, 0.85)],
labels=['$A$', '$\mu$', r'$\sigma$'],
bounds=(0.1, 0.1, 0.95, 0.95),
true_values=[A_true, x0_true, sigma_true],
fig=fig, colors='k')
ax = plt.axes([0.62, 0.62, 0.33, 0.33])
x_pdf = np.linspace(0, 10, 100)
y_pdf = A_true * signal.pdf(x_pdf) + (1 - A_true) * background.pdf(x_pdf)
ax.hist(x, 15, normed=True, histtype='stepfilled', alpha=0.5)
ax.plot(x_pdf, y_pdf, '-k')
ax.set_xlim(0, 10)
ax.set_ylim(0, 0.5)
ax.set_xlabel('$x$')
ax.set_ylabel(r'$y_{\rm obs}$')
plt.show()
| kcavagnolo/astroML | book_figures/chapter5/fig_signal_background.py | Python | bsd-2-clause | 3,660 | [
"Gaussian"
] | d53b8b4be667606a4c568fd9c375996416a929c3452ad57d51ee83951e305edf |
from __future__ import print_function
import time
from bokeh.browserlib import view
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, GlyphRenderer, Circle, HoverTool, BoxSelectTool
from bokeh.models.widgets import Select, HBox, VBox, DataTable, TableColumn, StringFormatter, NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor
from bokeh.document import Document
from bokeh.session import Session
from bokeh.sampledata.autompg2 import autompg2 as mpg
class DataTables(object):
def __init__(self):
self.document = Document()
self.session = Session()
self.session.use_doc('data_tables_server')
self.session.load_document(self.document)
self.manufacturer_filter = None
self.model_filter = None
self.transmission_filter = None
self.drive_filter = None
self.class_filter = None
self.source = ColumnDataSource()
self.update_data()
self.document.add(self.create())
self.session.store_document(self.document)
def create(self):
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
manufacturer_select = Select(title="Manufacturer:", value="All", options=["All"] + manufacturers)
manufacturer_select.on_change('value', self.on_manufacturer_change)
model_select = Select(title="Model:", value="All", options=["All"] + models)
model_select.on_change('value', self.on_model_change)
transmission_select = Select(title="Transmission:", value="All", options=["All"] + transmissions)
transmission_select.on_change('value', self.on_transmission_change)
drive_select = Select(title="Drive:", value="All", options=["All"] + drives)
drive_select.on_change('value', self.on_drive_change)
class_select = Select(title="Class:", value="All", options=["All"] + classes)
class_select.on_change('value', self.on_class_change)
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=self.source, columns=columns, editable=True)
xdr = DataRange1d(sources=[self.source.columns("index")])
ydr = DataRange1d(sources=[self.source.columns("cty"), self.source.columns("hwy")])
plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=800, plot_height=300)
xaxis = LinearAxis(plot=plot)
plot.below.append(xaxis)
yaxis = LinearAxis(plot=plot)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.left.append(yaxis)
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = GlyphRenderer(data_source=self.source, glyph=cty_glyph)
hwy = GlyphRenderer(data_source=self.source, glyph=hwy_glyph)
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(plot=plot, renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(plot=plot, renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(plot=plot, renderers=[cty, hwy], dimensions=['width'])
plot.tools.extend([cty_hover_tool, hwy_hover_tool, select_tool])
plot.renderers.extend([cty, hwy, ygrid])
controls = VBox(children=[manufacturer_select, model_select, transmission_select, drive_select, class_select], width=200)
top_panel = HBox(children=[controls, plot])
layout = VBox(children=[top_panel, data_table])
return layout
def on_manufacturer_change(self, obj, attr, _, value):
self.manufacturer_filter = None if value == "All" else value
self.update_data()
def on_model_change(self, obj, attr, _, value):
self.model_filter = None if value == "All" else value
self.update_data()
def on_transmission_change(self, obj, attr, _, value):
self.transmission_filter = None if value == "All" else value
self.update_data()
def on_drive_change(self, obj, attr, _, value):
self.drive_filter = None if value == "All" else value
self.update_data()
def on_class_change(self, obj, attr, _, value):
self.class_filter = None if value == "All" else value
self.update_data()
def update_data(self):
df = mpg
if self.manufacturer_filter:
df = df[df["manufacturer"] == self.manufacturer_filter]
if self.model_filter:
df = df[df["model"] == self.model_filter]
if self.transmission_filter:
df = df[df["trans"] == self.transmission_filter]
if self.drive_filter:
df = df[df["drv"] == self.drive_filter]
if self.class_filter:
df = df[df["class"] == self.class_filter]
self.source.data = ColumnDataSource.from_df(df)
self.session.store_document(self.document)
def run(self, do_view=False, poll_interval=0.5):
link = self.session.object_link(self.document.context)
print("Please visit %s to see the plots" % link)
if do_view: view(link)
print("\npress ctrl-C to exit")
self.session.poll_document(self.document)
if __name__ == "__main__":
data_tables = DataTables()
data_tables.run(True)
| zrhans/python | exemplos/Examples.lnk/bokeh/glyphs/data_tables_server.py | Python | gpl-2.0 | 6,919 | [
"VisIt"
] | 6811f80ee3cda8987473cc25bf0cfb6d81ed1ce853f4b5c7628da4c0af530610 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import numpy as np
import bob.ip.gabor
import skimage.measure
from braincode.util import configParser
from braincode.math import make_2d_gaussian
#from braincode.math import img_resize
def get_gabor_kernels(feat_dir):
"""gabor bank generation"""
gwt = bob.ip.gabor.Transform(number_of_scales=9)
gwt.generate_wavelets(500, 500)
gabor_real = np.zeros((500, 500, 72))
gabor_imag = np.zeros((500, 500, 72))
for i in range(72):
w = bob.ip.gabor.Wavelet(resolution=(500, 500),
frequency=gwt.wavelet_frequencies[i])
sw = bob.sp.ifft(w.wavelet.astype(np.complex128))
gabor_real[..., i] = np.roll(np.roll(np.real(sw), 250, 0), 250, 1)
gabor_imag[..., i] = np.roll(np.roll(np.imag(sw), 250, 0), 250, 1)
outfile = os.path.join(feat_dir, 'gabor_kernels.npz')
np.savez(outfile, gabor_real=gabor_real, gabor_imag = gabor_imag)
def get_model_zparas(feat_dir):
"""Get mean and std of time courses for each model."""
# load candidate models
models = np.load(os.path.join(feat_dir, 'train_candidate_model.npy'),
mmap_mode='r')
model_mean = np.zeros((42500, 72))
model_std = np.zeros((42500, 72))
for i in range(42500):
print 'Model %s'%(i)
x = np.array(models[i, ...]).astype(np.float64)
model_mean[i] = np.mean(x, axis=0)
model_std[i] = np.std(x, axis=0)
outfile = os.path.join(feat_dir, 'model_norm_paras.npz')
np.savez(outfile, model_mean=model_mean, model_std=model_std)
def get_vxl_coding_wts(feat_dir, prf_dir, roi):
"""Generate voxel-wise encoding model of specific roi."""
roi_dir = os.path.join(prf_dir, roi)
# load model parameters
sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
sel_paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
sel_model_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy'))
# load model norm paras
norm_paras = np.load(os.path.join(feat_dir, 'model_norm_paras.npz'))
# select voxels
thr = 0.3
#sel_vxl_idx = np.array([4, 6, 40, 160])
sel_vxl_idx = np.nonzero(sel_model_corr>thr)[0]
print 'Selecte %s voxels'%(sel_vxl_idx.shape[0])
parts = np.ceil(sel_vxl_idx.shape[0]*1.0/10)
for p in range(int(parts)):
if (p+1)*10 > sel_vxl_idx.shape[0]:
tmp_idx = sel_vxl_idx[(p*10):]
else:
tmp_idx = sel_vxl_idx[(p*10):(p*10+10)]
wt = np.zeros((500, 500, 72, tmp_idx.shape[0]), dtype=np.float32)
bias = np.zeros(tmp_idx.shape[0])
for i in range(tmp_idx.shape[0]):
print 'Voxel %s'%(tmp_idx[i])
model_idx = int(sel_models[tmp_idx[i]])
# get gaussian pooling field parameters
si = model_idx / 2500
xi = (model_idx % 2500) / 50
yi = (model_idx % 2500) % 50
x0 = np.arange(5, 500, 10)[xi]
y0 = np.arange(5, 500, 10)[yi]
sigma = [1] + [n*5 for n in range(1, 13)] + [70, 80, 90, 100]
s = sigma[si]
print 'center: %s, %s, sigma: %s'%(y0, x0, s)
kernel = make_2d_gaussian(500, s, center=(x0, y0))
kernel = np.expand_dims(kernel, 0)
kernel = np.repeat(kernel, 72, 0)
coding_wts = sel_paras[tmp_idx[i]]
norm_mean = norm_paras['model_mean'][model_idx]
norm_std = norm_paras['model_std'][model_idx]
for c in range(72):
kernel[c, ...] = kernel[c, ...] * coding_wts[c] / norm_std[c]
kernel = np.swapaxes(kernel, 0, 1)
kernel = np.swapaxes(kernel, 1, 2)
wt[..., i] = kernel
bias[i] = np.sum(coding_wts * norm_mean / norm_std)
outdir = os.path.join(roi_dir, 'tfrecon')
if not os.path.exists(outdir):
os.makedirs(outdir, 0755)
outfile = os.path.join(outdir, 'vxl_coding_wts_%s.npz'%(p+1))
np.savez(outfile, vxl_idx=tmp_idx, wt=wt, bias=bias)
def get_vxl_coding_resp(feat_dir, prf_dir, roi):
"""Generate voxel-wise encoding model of specific roi."""
roi_dir = os.path.join(prf_dir, roi)
# load model parameters
sel_models = np.load(os.path.join(roi_dir, 'reg_sel_model.npy'))
sel_paras = np.load(os.path.join(roi_dir, 'reg_sel_paras.npy'))
sel_model_corr = np.load(os.path.join(roi_dir, 'reg_sel_model_corr.npy'))
# load candidate model
tmodels = np.load(os.path.join(feat_dir, 'train_candidate_model.npy'),
mmap_mode='r')
# select voxels
thr = 0.3
sel_vxl_idx = np.array([4, 5, 6])
#sel_vxl_idx = np.nonzero(sel_model_corr>thr)[0]
for i in range(sel_vxl_idx.shape[0]):
print 'Voxel %s'%(sel_vxl_idx[i])
model_idx = int(sel_models[sel_vxl_idx[i]])
tx = np.array(tmodels[model_idx, ...]).astype(np.float64)
m = np.mean(tx, axis=0, keepdims=True)
s = np.std(tx, axis=0, keepdims=True)
tx = (tx - m) / (s + 1e-5)
wts = sel_paras[sel_vxl_idx[i]]
pred = np.dot(tx, wts)
print pred[:10]
if __name__ == '__main__':
"""Main function."""
# config parser
cf = configParser.Config('config')
# database directory config
db_dir = os.path.join(cf.get('database', 'path'), 'vim1')
# directory config for analysis
root_dir = cf.get('base', 'path')
feat_dir = os.path.join(root_dir, 'sfeatures', 'vim1')
res_dir = os.path.join(root_dir, 'subjects')
#-- general config
subj_id = 1
roi = 'v1'
# directory config
subj_dir = os.path.join(res_dir, 'vim1_S%s'%(subj_id))
prf_dir = os.path.join(subj_dir, 'prf')
#get_gabor_kernels(feat_dir)
#get_model_zparas(feat_dir)
get_vxl_coding_wts(feat_dir, prf_dir, roi)
#get_vxl_coding_resp(feat_dir, prf_dir, roi)
| sealhuang/brainCodingToolbox | braincode/prf/tfrecon_util.py | Python | bsd-3-clause | 5,942 | [
"Gaussian"
] | 2b4f76528e54b26715af2948283b8da703c0c08aca0f9dab9e38d29929b53155 |
#!/usr/bin/env python
# general imports
from numpy import *
from random import *
# imp general
import IMP
# our project
from IMP.isd import *
# unit testing framework
import IMP.test
class MockFunc:
def __init__(self, setval, evaluate, evalargs=1, update=None):
self.__set = setval
self.__eval = evaluate
self.__update = update
self.__evalargs = evalargs
def set_evalargs(self, evalargs):
self.__evalargs = evalargs
def __call__(self, value):
self.__set(value)
if self.__update:
self.__update()
return self.__eval(self.__evalargs)
class Tests(IMP.test.TestCase):
"""test of the GPI restraint with two data points, linear prior mean and
gaussian prior covariances. Mean function is not optimized.
"""
def setUp(self):
IMP.test.TestCase.setUp(self)
# IMP.set_log_level(IMP.TERSE)
IMP.set_log_level(0)
self.m = IMP.Model()
data = open(self.get_input_file_name('lyzexp_gpir.dat')).readlines()
data = [list(map(float, d.split())) for d in data]
self.q = [[i[0]] for i in data]
self.I = [i[1] for i in data]
self.err = [i[2] for i in data]
self.N = 10
self.G = Scale.setup_particle(IMP.Particle(self.m), 3.0)
self.G.set_nuisance_is_optimized(False)
self.Rg = Scale.setup_particle(IMP.Particle(self.m), 10.0)
self.Rg.set_nuisance_is_optimized(False)
# put d=15 so we don't use the porod region
self.d = Scale.setup_particle(IMP.Particle(self.m), 15.0)
self.d.set_nuisance_is_optimized(False)
self.s = Scale.setup_particle(IMP.Particle(self.m), 0.0)
self.s.set_nuisance_is_optimized(False)
self.A = Scale.setup_particle(IMP.Particle(self.m), 0.0)
self.A.set_nuisance_is_optimized(False)
self.mean = GeneralizedGuinierPorodFunction(
self.G, self.Rg, self.d, self.s, self.A)
self.tau = Switching.setup_particle(IMP.Particle(self.m), 1.0)
self.tau.set_nuisance_is_optimized(True)
self.lam = Scale.setup_particle(IMP.Particle(self.m), 1.0)
self.lam.set_nuisance_is_optimized(True)
self.sig = Scale.setup_particle(IMP.Particle(self.m), 1.0)
self.sig.set_nuisance_is_optimized(True)
self.cov = Covariance1DFunction(self.tau, self.lam, 2.0)
self.gpi = IMP.isd.GaussianProcessInterpolation(self.q, self.I,
self.err, self.N, self.mean, self.cov, self.sig)
self.gpr = IMP.isd.GaussianProcessInterpolationRestraint(
self.m, self.gpi)
self.sf = IMP.core.RestraintsScoringFunction([self.gpr])
self.particles = [
self.G,
self.Rg,
self.d,
self.s,
self.sig,
self.tau,
self.lam]
def shuffle_particle_values(self):
particles = [(self.alpha, -10, 10),
(self.beta, -10, 10),
(self.tau, 0.001, 10),
(self.lam, 0.1, 10),
(self.sig, 0.1, 10)]
# number of shuffled values
for i in range(randint(0, 5)):
# which particle
p, imin, imax = particles.pop(randint(0, len(particles) - 1))
p.set_nuisance(uniform(imin, imax))
def testDerivNumericSigma(self):
"""
test the derivatives of the gpi numerically for Sigma
"""
pnum = 4
values = range(1, 10)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, .1)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testDerivNumericTau(self):
"""
test the derivatives of the gpi numerically for Tau
"""
pnum = 5
values = linspace(.1, .9)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, .01)
self.assertAlmostEqual(expected, observed, delta=5e-2)
def testDerivNumericLambda(self):
"""
test the derivatives of the gpi numerically for Lambda
"""
pnum = 6
values = linspace(.3, 2)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, .02)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericSigmaSigma(self):
"""
test the Hessian of the function numerically wrt Sigma and Sigma
"""
pa = 4
pb = 4
values = range(1, 3)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 4][pb - 4]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericSigmaTau(self):
"""
test the Hessian of the function numerically wrt Sigma and Tau
"""
pa = 4
pb = 5
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 4][pb - 4]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericSigmaLambda(self):
"""
test the Hessian of the function numerically wrt Sigma and Lambda
"""
pa = 4
pb = 6
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 4][pb - 4]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericTauTau(self):
"""
test the Hessian of the function numerically wrt Tau and Tau
"""
pa = 5
pb = 5
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 4][pb - 4]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericTauLambda(self):
"""
test the Hessian of the function numerically wrt Tau and Lambda
"""
pa = 5
pb = 6
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 4][pb - 4]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericLambdaLambda(self):
"""
test the Hessian of the function numerically wrt Lambda and Lambda
"""
pa = 6
pb = 6
values = linspace(1, 10)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 4][pb - 4]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
if __name__ == '__main__':
IMP.test.main()
| shanot/imp | modules/isd/test/medium_test_GaussianProcessInterpolationRestraintNumericallyNoMean.py | Python | gpl-3.0 | 10,128 | [
"Gaussian"
] | 4b426ffa94ff2b011b60055cc0642737230b82141c5daeabf84d7a163a84f4e1 |
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from __future__ import division ## removes integer division
import os
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_raises
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.sparse import csr_matrix
from scipy import io
from megaman.geometry import (compute_adjacency_matrix,
compute_affinity_matrix, Affinity,
affinity_methods)
random_state = np.random.RandomState(36)
n_sample = 10
d = 2
X = random_state.randn(n_sample, d)
D = squareform(pdist(X))
D[D > 1/d] = 0
TEST_DATA = os.path.join(os.path.dirname(__file__),
'testmegaman_laplacian_rad0_2_lam1_5_n200.mat')
def test_affinity_methods():
assert_equal(set(affinity_methods()), {'auto', 'gaussian'})
def test_affinity_input_validation():
X = np.random.rand(20, 3)
D = compute_adjacency_matrix(X, radius=1)
assert_raises(ValueError, compute_affinity_matrix, X)
def test_affinity_sparse_vs_dense():
"""
Test that A_sparse is the same as A_dense for a small A matrix
"""
rad = 2.
n_samples = 6
X = np.arange(n_samples)
X = X[ :,np.newaxis]
X = np.concatenate((X,np.zeros((n_samples,1),dtype=float)),axis=1)
X = np.asarray( X, order="C" )
test_dist_matrix = compute_adjacency_matrix( X, method = 'auto', radius = rad )
A_dense = compute_affinity_matrix(test_dist_matrix.toarray(), method = 'auto',
radius = rad, symmetrize = False )
A_sparse = compute_affinity_matrix(csr_matrix(test_dist_matrix),
method = 'auto', radius = rad, symmetrize = False)
A_spdense = A_sparse.toarray()
A_spdense[ A_spdense == 0 ] = 1.
assert_allclose(A_dense, A_spdense)
def test_affinity_vs_matlab():
"""Test that the affinity calculation matches the matlab result"""
matlab = io.loadmat(TEST_DATA)
D = np.sqrt(matlab['S']) # matlab outputs squared distances
A_matlab = matlab['A']
radius = matlab['rad'][0]
# check dense affinity computation
A_dense = compute_affinity_matrix(D, radius=radius)
assert_allclose(A_dense, A_matlab)
# check sparse affinity computation
A_sparse = compute_affinity_matrix(csr_matrix(D), radius=radius)
assert_allclose(A_sparse.toarray(), A_matlab)
def test_affinity():
rand = np.random.RandomState(42)
X = np.random.rand(20, 3)
D = cdist(X, X)
def check_affinity(adjacency_radius, affinity_radius, symmetrize):
adj = compute_adjacency_matrix(X, radius=adjacency_radius)
aff = compute_affinity_matrix(adj, radius=affinity_radius,
symmetrize=True)
A = np.exp(-(D / affinity_radius) ** 2)
A[D > adjacency_radius] = 0
assert_allclose(aff.toarray(), A)
for adjacency_radius in [0.5, 1.0, 5.0]:
for affinity_radius in [0.1, 0.5, 1.0]:
for symmetrize in [True, False]:
yield (check_affinity, adjacency_radius,
affinity_radius, symmetrize)
def test_custom_affinity():
class CustomAffinity(Affinity):
name = "custom"
def affinity_matrix(self, adjacency_matrix):
return np.exp(-abs(adjacency_matrix.toarray()))
rand = np.random.RandomState(42)
X = rand.rand(10, 2)
D = compute_adjacency_matrix(X, radius=10)
A = compute_affinity_matrix(D, method='custom', radius=1)
assert_allclose(A, np.exp(-abs(D.toarray())))
Affinity._remove_from_registry("custom")
| jakevdp/megaman | megaman/geometry/tests/test_affinity.py | Python | bsd-2-clause | 3,661 | [
"Gaussian"
] | 6c833e71417dd05061fd6454bed4165925aa61eb14a50becf4a0c94dfa67c16b |
"""
Copyright (C) 2009-2015 Jussi Leinonen, Finnish Meteorological Institute,
California Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
from scipy.integrate import quad, dblquad
def gaussian_pdf(std=10.0, mean=0.0):
"""Gaussian PDF for orientation averaging.
Args:
std: The standard deviation in degrees of the Gaussian PDF
mean: The mean in degrees of the Gaussian PDF. This should be a number
in the interval [0, 180)
Returns:
pdf(x), a function that returns the value of the spherical Jacobian-
normalized Gaussian PDF with the given STD at x (degrees). It is
normalized for the interval [0, 180].
"""
norm_const = 1.0
def pdf(x):
return norm_const*np.exp(-0.5 * ((x-mean)/std)**2) * \
np.sin(np.pi/180.0 * x)
norm_dev = quad(pdf, 0.0, 180.0)[0]
# ensure that the integral over the distribution equals 1
norm_const /= norm_dev
return pdf
def uniform_pdf():
"""Uniform PDF for orientation averaging.
Returns:
pdf(x), a function that returns the value of the spherical Jacobian-
normalized uniform PDF. It is normalized for the interval [0, 180].
"""
norm_const = 1.0
def pdf(x):
return norm_const * np.sin(np.pi/180.0 * x)
norm_dev = quad(pdf, 0.0, 180.0)[0]
# ensure that the integral over the distribution equals 1
norm_const /= norm_dev
return pdf
def orient_single(tm):
"""Compute the T-matrix using a single orientation scatterer.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
return tm.get_SZ_single()
def orient_averaged_adaptive(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
def Sfunc(beta, alpha, i, j, real):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
s = S_ang[i,j].real if real else S_ang[i,j].imag
return s * tm.or_pdf(beta)
ind = range(2)
for i in ind:
for j in ind:
S.real[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0
S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0
def Zfunc(beta, alpha, i, j):
(S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
return Z_ang[i,j] * tm.or_pdf(beta)
ind = range(4)
for i in ind:
for j in ind:
Z[i,j] = dblquad(Zfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0
return (S, Z)
def orient_averaged_fixed(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a fast Gaussian quadrature and is suitable
for most use. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance.
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
ap = np.linspace(0, 360, tm.n_alpha+1)[:-1]
aw = 1.0/tm.n_alpha
for alpha in ap:
for (beta, w) in zip(tm.beta_p, tm.beta_w):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
S += w * S_ang
Z += w * Z_ang
sw = tm.beta_w.sum()
#normalize to get a proper average
S *= aw/sw
Z *= aw/sw
return (S, Z)
| jleinonen/pytmatrix | pytmatrix/orientation.py | Python | mit | 4,905 | [
"Gaussian"
] | 8e8255533245de0ec6948dbc719c78be22f6f51b6a12763d9ffeee0592e482b2 |
# -*- coding: utf-8 -*-
__docformat__='restructuredtext'
from popserver.tests.nodb_model import *
from popserver.tests import *
from fixture import DataTestCase
from popserver.tests import popfixtures
from popserver.agents.lastfm_agent import LastFmAgent
from popserver.agents.lastfm_client import LastFMClient
import popserver.agents
import types
import unittest
class TestLastFmClient(unittest.TestCase):
def setUp(self):
self._restoreMethod = popserver.agents.lastfm_client.LastFMClient._getFeed
LastFMClient._getFeed = types.MethodType(mock_lastfm_getFeed, None, LastFMClient)
self.client = LastFMClient()
def tearDown(self):
LastFMClient._getFeed = types.MethodType(self._restoreMethod, None, LastFMClient)
def testRecentTracks(self):
t = self.client.getRecentTracksForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['artist'] == 'Willie Bobo'
def testTopTracks(self):
t = self.client.getTopTracksForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['artist'] == 'Brian Wilson'
assert t[0]['name'] == 'Our Prayer Gee'
def testTopArtists(self):
t = self.client.getTopArtistsForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['name'] == 'The Beatles'
def testUserTagsForTrack(self):
t = self.client.getUserTagsForTrack('maristaran', 'Brian Wilson', 'Our Prayer Gee')
assert type(t) == type([])
assert len(t) == 1
assert t == ['bombastic']
def testTopArtistsForUser(self):
t = self.client.getTopArtistsForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert t[0]['name'] == 'The Beatles'
def testTopTagsForTrack(self):
t = self.client.getTopTagsForTrack('Willie Bobo', 'Funky Sneakers')
assert type(t) == type([])
assert len(t) == 0
def testGetArtistData(self):
t = self.client.getArtistData('Brian Wilson')
assert type(t) == type({})
assert t['name'] == 'Brian Wilson'
# TODO: tests para el agente
# class TestLastFmAgent(TestModel, DataTestCase):
# fixture = dbfixture
# datasets = [popfixtures.UserData, popfixtures.ServiceTypeData, popfixtures.ServiceData, popfixtures.AccountData]
# def setUp(self):
# TestModel.setUp(self)
# DataTestCase.setUp(self)
# self._restoreMethod = popserver.agents.lastfm_client.LastFMClient._getFeed
# LastFMClient._getFeed = types.MethodType(mock_lastfm_getFeed, None, LastFMClient)
# self.agent = LastFmAgent()
# self.user = self.data.UserData.dartagnan
# self.lastfm_svc = self.data.ServiceData.lastfm
# self.account = Account.get_by(user_id=self.user.id, service_id=self.lastfm_svc.id)
# def tearDown(self):
# dbsession.clear()
# DataTestCase.tearDown(self)
# TestModel.tearDown(self)
# LastFMClient._getFeed = types.MethodType(self._restoreMethod, None, LastFMClient)
# def test_getUserGraph(self):
# r = self.agent.getUserGraph(self.account)
# assert len(r) == 3 # grupos: top artists, top tracks y recently_listened
# assert map(type, r) == [popserver.model.ItemGroup, popserver.model.ItemGroup, popserver.model.ItemGroup]
# assert map(lambda g: type(g.items[0]), r) == [popserver.model.UserItem, popserver.model.UserItem,popserver.model.UserItem]
# assert map(lambda g: len(g.items), r) == [1, 1, 1]
# top_artists = r[0]
# assert type(top_artists.items[0].item) == popserver.model.Artist
# assert top_artists.items[0].item.title == 'The Beatles'
# top_tracks = r[1]
# assert type(top_tracks.items[0].item) == popserver.model.Song
# assert top_tracks.items[0].item.title == 'Our Prayer Gee'
# assert top_tracks.items[0].item.artist.title == 'Brian Wilson'
# recently_listened = r[2]
# assert type(recently_listened.items[0].item) == popserver.model.Song
# assert recently_listened.items[0].item.title == 'Funky Sneakers'
# assert recently_listened.items[0].item.artist.title == 'Willie Bobo'
# assert True
def mock_lastfm_getFeed(self, url):
samples = {
'http://ws.audioscrobbler.com/1.0/user/maristaran/recenttracks.xml' : 'recenttracks.xml',
'http://ws.audioscrobbler.com/1.0/artist/Willie%2BBobo/similar.xml' : 'willie-bobo-similar.xml',
'http://ws.audioscrobbler.com/1.0/track/Willie%2BBobo/Funky%2BSneakers/toptags.xml' : 'funky-sneakers-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/tracktags.xml?artist=Willie+Bobo&track=Funky+Sneakers' : 'funky-sneakers-tracktags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/toptracks.xml' : 'toptracks.xml',
'http://ws.audioscrobbler.com/1.0/artist/Brian%2BWilson/similar.xml' : 'brian-wilson-similar.xml',
'http://ws.audioscrobbler.com/1.0/track/Brian%2BWilson/Our%2BPrayer%2BGee/toptags.xml' : 'our-prayer-gee-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/tracktags.xml?artist=Brian+Wilson&track=Our+Prayer+Gee' : 'maristaran-our-prayer-gee-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/topartists.xml' : 'topartists.xml',
'http://ws.audioscrobbler.com/1.0/artist/The%2BBeatles/similar.xml' : 'beatles-similar.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/artisttags.xml?artist=The+Beatles' : 'maristaran-beatles-tags.xml'
}
import xml.dom.minidom
if samples[url] == 404:
import urllib2
raise urllib2.HTTPError
else:
return xml.dom.minidom.parse(popserver.tests.__path__[0] + '/samples/lastfm/' + samples[url])
# class TestLastfmAgent(DataTestCase, TestModel):
# fixture = dbfixture
# datasets = [popfixtures.UserData, popfixtures.ServiceTypeData, popfixtures.ServiceData, popfixtures.AccountData]
# def setUp(self):
# TestModel.setUp(self)
# DataTestCase.setUp(self)
# self.user = User.get_by(username='darty')
# self.lastfm_svc = Service.get_by(name='Last.FM')
# self.account = Account.get_by(user=self.user, service=self.lastfm_svc)
# self.agent = self.lastfm_svc.getAgent()
# def tearDown(self):
# DataTestCase.tearDown(self)
# TestModel.tearDown(self)
# LastFmAgent._getFeed = orig_getFeed
| santisiri/popego | popego/popserver/popserver/tests/agents/test_lastfm.py | Python | bsd-3-clause | 6,723 | [
"Brian"
] | 1658015ab7d6f363748a34c74137e4ee1e7c1f564e9f6a6d02ac77794fcc0d06 |
import datetime
import hashlib
import itertools
import logging
import os
import time
from collections import defaultdict
from dataclasses import asdict, dataclass, field
from operator import itemgetter
from typing import (
IO,
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import override as override_language
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat
from analytics.models import RealmCount, StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
get_stream_cache_key,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_delivery_email_cache_key,
)
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import (
InvitationError,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
ZephyrMessageAlreadySentException,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MessageRenderingResult, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.mention import MentionData
from zerver.lib.message import (
MessageDict,
SendMessageRequest,
access_message,
get_last_message_id,
normalize_body,
render_markdown,
truncate_topic,
update_first_visible_message_id,
wildcard_mention_allowed,
)
from zerver.lib.notification_data import UserMessageNotificationsData, get_user_group_mentions_data
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
SubInfo,
bulk_get_private_peers,
bulk_get_subscriber_peer_info,
get_active_subscriptions_for_stream_id,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
get_subscriptions_for_send_message,
get_user_ids_for_streams,
num_subscribers_for_stream_id,
subscriber_ids_with_stream_history_access,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_by_id,
access_stream_for_send_message,
can_access_stream_user_ids,
check_stream_access_based_on_stream_post_policy,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.timezone import canonicalize_timezone
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
RESOLVED_TOPIC_PREFIX,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_edit_history,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileDataElementValue, ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_mutes import add_user_mute, get_muting_users, get_user_mutes
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions, is_widget_message
from zerver.models import (
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
Draft,
EmailChangeStatus,
Message,
MultiuseInvite,
MutedUser,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
RealmPlayground,
RealmUserDefault,
Recipient,
ScheduledEmail,
ScheduledMessage,
ScheduledMessageNotificationEmail,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_realm,
get_realm_playgrounds,
get_stream,
get_stream_by_id_in_realm,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
linkifiers_for_realm,
query_for_ids,
realm_filters_for_realm,
validate_attachment_request,
)
from zerver.tornado.django_api import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
downgrade_now_without_creating_additional_invoices,
update_license_ledger_if_needed,
)
@dataclass
class SubscriptionInfo:
subscriptions: List[Dict[str, Any]]
unsubscribed: List[Dict[str, Any]]
never_subscribed: List[Dict[str, Any]]
# These are hard to type-check because of the API_FIELDS loops.
RawStreamDict = Dict[str, Any]
RawSubscriptionDict = Dict[str, Any]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90",
"#fae589",
"#a6c7e5",
"#e79ab5",
"#bfd56f",
"#f4ae55",
"#b0a5fd",
"#addfe5",
"#f5ce6e",
"#c2726a",
"#94c849",
"#bd86e5",
"#ee7e4a",
"#a6dcbf",
"#95a5fd",
"#53a063",
"#9987e1",
"#e4523d",
"#c2c2c2",
"#4f8de4",
"#c6a8ad",
"#e7cc4d",
"#c8bebf",
"#a47462",
]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {"id": user_id, "flags": ["read"]}
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream
and user_profile.default_sending_stream.invite_only
or user_profile.default_events_register_stream
and user_profile.default_events_register_stream.invite_only
)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {
str(UserProfile.ROLE_REALM_ADMINISTRATOR): 0,
str(UserProfile.ROLE_REALM_OWNER): 0,
str(UserProfile.ROLE_MODERATOR): 0,
str(UserProfile.ROLE_MEMBER): 0,
str(UserProfile.ROLE_GUEST): 0,
}
for value_dict in list(
UserProfile.objects.filter(realm=realm, is_bot=False, is_active=True)
.values("role")
.annotate(Count("role"))
):
human_counts[str(value_dict["role"])] = value_dict["role__count"]
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def send_message_to_signup_notification_stream(
sender: UserProfile, realm: Realm, message: str, topic_name: str = _("signups")
) -> None:
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream is None:
return
with override_language(realm.default_language):
internal_send_stream_message(sender, signup_notifications_stream, topic_name, message)
def notify_new_user(user_profile: UserProfile) -> None:
user_count = realm_user_count(user_profile.realm)
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email, user_profile.realm_id)
is_first_user = user_count == 1
if not is_first_user:
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"@_**{user_profile.full_name}|{user_profile.id}**", user_count=user_count
)
if settings.BILLING_ENABLED:
from corporate.lib.registration import generate_licenses_low_warning_message_if_required
licenses_low_warning_message = generate_licenses_low_warning_message_if_required(
user_profile.realm
)
if licenses_low_warning_message is not None:
message += "\n"
message += licenses_low_warning_message
send_message_to_signup_notification_stream(sender, user_profile.realm, message)
# We also send a notification to the Zulip administrative realm
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
admin_realm_sender = get_system_bot(sender_email, admin_realm.id)
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count
)
internal_send_stream_message(
admin_realm_sender, signups_stream, user_profile.realm.display_subdomain, message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(
recipient_id__in=recipient_ids, date_sent__gt=one_week_ago
).order_by("-id")
message_ids_to_use = list(
reversed(recent_messages.values_list("id", flat=True)[0:ONBOARDING_TOTAL_MESSAGES])
)
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(
UserMessage.objects.filter(
message_id__in=message_ids_to_use, user_profile=user_profile
).values_list("message_id", flat=True)
)
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
def process_new_human_user(
user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
realm_creation: bool = False,
) -> None:
realm = user_profile.realm
mit_beta_user = realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams: List[Stream] = list(prereg_user.streams.all())
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(
realm,
streams,
[user_profile],
from_user_creation=True,
acting_user=acting_user,
)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT, prereg_user.referred_by.realm_id),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>"
),
)
revoke_preregistration_users(user_profile, prereg_user, realm_creation)
if not realm_creation and prereg_user is not None and prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
def revoke_preregistration_users(
created_user_profile: UserProfile,
used_preregistration_user: Optional[PreregistrationUser],
realm_creation: bool,
) -> None:
if used_preregistration_user is None:
assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
if used_preregistration_user is not None:
used_preregistration_user.status = confirmation_settings.STATUS_ACTIVE
used_preregistration_user.save(update_fields=["status"])
# In the special case of realm creation, there can be no additional PreregistrationUser
# for us to want to modify - because other realm_creation PreregistrationUsers should be
# left usable for creating different realms.
if realm_creation:
return
# Mark any other PreregistrationUsers in the realm that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics.
if used_preregistration_user is not None:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).exclude(id=used_preregistration_user.id).update(
status=confirmation_settings.STATUS_REVOKED
)
else:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).update(status=confirmation_settings.STATUS_REVOKED)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(
user_profile.realm,
user_profile,
user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={},
)
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(
email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services=get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot["owner_id"] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(
realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int] = None
) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(
email: str,
password: Optional[str],
realm: Realm,
full_name: str,
bot_type: Optional[int] = None,
role: Optional[int] = None,
bot_owner: Optional[UserProfile] = None,
tos_version: Optional[str] = None,
timezone: str = "",
avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream] = None,
default_events_register_stream: Optional[Stream] = None,
default_all_public_streams: Optional[bool] = None,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
source_profile: Optional[UserProfile] = None,
realm_creation: bool = False,
*,
acting_user: Optional[UserProfile],
enable_marketing_emails: bool = True,
) -> UserProfile:
user_profile = create_user(
email=email,
password=password,
realm=realm,
full_name=full_name,
role=role,
bot_type=bot_type,
bot_owner=bot_owner,
tos_version=tos_version,
timezone=timezone,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile,
enable_marketing_emails=enable_marketing_emails,
)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
if realm_creation:
# If this user just created a realm, make sure they are
# properly tagged as the creator of the realm.
realm_creation_audit_log = (
RealmAuditLog.objects.filter(event_type=RealmAuditLog.REALM_CREATED, realm=realm)
.order_by("id")
.last()
)
assert realm_creation_audit_log is not None
realm_creation_audit_log.acting_user = user_profile
realm_creation_audit_log.save(update_fields=["acting_user"])
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(
user_profile,
prereg_user=prereg_user,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation,
)
return user_profile
def do_activate_mirror_dummy_user(
user_profile: UserProfile, *, acting_user: Optional[UserProfile]
) -> None:
"""Called to have a user "take over" a "mirror dummy" user
(i.e. is_mirror_dummy=True) account when they sign up with the
same email address.
Essentially, the result should be as though we had created the
UserProfile just now with do_create_user, except that the mirror
dummy user may appear as the recipient or sender of messages from
before their account was fully created.
TODO: This function likely has bugs resulting from this being a
parallel code path to do_create_user; e.g. it likely does not
handle preferences or default streams properly.
"""
with transaction.atomic():
change_user_is_active(user_profile, True)
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(
update_fields=["date_joined", "password", "is_mirror_dummy", "tos_version"]
)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
"""Reactivate a user that had previously been deactivated"""
with transaction.atomic():
change_user_is_active(user_profile, True)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(
realm: Realm, name: str, value: Any, *, acting_user: Optional[UserProfile]
) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(
value, property_type
), f"Cannot update {name}: {value} is not an instance of {property_type}"
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type="realm",
op="update",
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ["email"])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(
realm: Realm, authentication_methods: Dict[str, bool], *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.authentication_methods_dict()
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=["authentication_methods"])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: updated_value,
"property": "authentication_methods",
}
).decode(),
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(
realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
*,
acting_user: Optional[UserProfile],
) -> None:
old_values = dict(
allow_message_editing=realm.allow_message_editing,
message_content_edit_limit_seconds=realm.message_content_edit_limit_seconds,
edit_topic_policy=realm.edit_topic_policy,
)
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.edit_topic_policy = edit_topic_policy
event_time = timezone_now()
updated_properties = dict(
allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
edit_topic_policy=edit_topic_policy,
)
for updated_property, updated_value in updated_properties.items():
if updated_value == old_values[updated_property]:
continue
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_values[updated_property],
RealmAuditLog.NEW_VALUE: updated_value,
"property": updated_property,
}
).decode(),
)
realm.save(update_fields=list(updated_properties.keys()))
event = dict(
type="realm",
op="update_dict",
property="default",
data=updated_properties,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.notifications_stream_id
realm.notifications_stream = stream
realm.save(update_fields=["notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.signup_notifications_stream_id
realm.signup_notifications_stream = stream
realm.save(update_fields=["signup_notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "signup_notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_user_default_setting(
realm_user_default: RealmUserDefault,
name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
old_value = getattr(realm_user_default, name)
realm = realm_user_default.realm
event_time = timezone_now()
with transaction.atomic(savepoint=False):
setattr(realm_user_default, name, value)
realm_user_default.save(update_fields=[name])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEFAULT_USER_SETTINGS_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
event = dict(
type="realm_user_settings_defaults",
op="update",
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEACTIVATED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
# This event will only ever be received by clients with an active
# longpoll connection, because by this point clients will be
# unable to authenticate again to their event queue (triggering an
# immediate reload into the page explaining the realm was
# deactivated). So the purpose of sending this is to flush all
# active longpoll connections for the realm.
event = dict(type="realm", op="deactivated", realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
def do_change_realm_subdomain(
realm: Realm, new_subdomain: str, *, acting_user: Optional[UserProfile]
) -> None:
old_subdomain = realm.subdomain
old_uri = realm.uri
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_subdomain": old_subdomain, "new_subdomain": new_subdomain},
)
# If a realm if being renamed multiple times, we should find all the placeholder
# realms and reset their deactivated_redirect field to point to the new realm uri
placeholder_realms = Realm.objects.filter(deactivated_redirect=old_uri, deactivated=True)
for placeholder_realm in placeholder_realms:
do_add_deactivated_redirect(placeholder_realm, realm.uri)
# When we change a realm's subdomain the realm with old subdomain is basically
# deactivated. We are creating a deactivated realm using old subdomain and setting
# it's deactivated redirect to new_subdomain so that we can tell the users that
# the realm has been moved to a new subdomain.
placeholder_realm = do_create_realm(old_subdomain, realm.name)
do_deactivate_realm(placeholder_realm, acting_user=None)
do_add_deactivated_redirect(placeholder_realm, realm.uri)
def do_add_deactivated_redirect(realm: Realm, redirect_url: str) -> None:
realm.deactivated_redirect = redirect_url
realm.save(update_fields=["deactivated_redirect"])
def do_scrub_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED,
)
def do_delete_user(user_profile: UserProfile) -> None:
if user_profile.realm.is_zephyr_mirror_realm:
raise AssertionError("Deleting zephyr mirror users is not supported")
do_deactivate_user(user_profile, acting_user=None)
subscribed_huddle_recipient_ids = set(
Subscription.objects.filter(
user_profile=user_profile, recipient__type=Recipient.HUDDLE
).values_list("recipient_id", flat=True)
)
user_id = user_profile.id
realm = user_profile.realm
personal_recipient = user_profile.recipient
user_profile.delete()
# Recipient objects don't get deleted through CASCADE, so we need to handle
# the user's personal recipient manually. This will also delete all Messages pointing
# to this recipient (all private messages sent to the user).
assert personal_recipient is not None
personal_recipient.delete()
replacement_user = create_user(
force_id=user_id,
email=f"deleteduser{user_id}@{realm.uri}",
password=None,
realm=realm,
full_name=f"Deleted User {user_id}",
is_mirror_dummy=True,
)
subs_to_recreate = [
Subscription(
user_profile=replacement_user,
recipient=recipient,
is_user_active=replacement_user.is_active,
)
for recipient in Recipient.objects.filter(id__in=subscribed_huddle_recipient_ids)
]
Subscription.objects.bulk_create(subs_to_recreate)
def change_user_is_active(user_profile: UserProfile, value: bool) -> None:
"""
Helper function for changing the .is_active field. Not meant as a standalone function
in production code as properly activating/deactivating users requires more steps.
This changes the is_active value and saves it, while ensuring
Subscription.is_user_active values are updated in the same db transaction.
"""
with transaction.atomic(savepoint=False):
user_profile.is_active = value
user_profile.save(update_fields=["is_active"])
Subscription.objects.filter(user_profile=user_profile).update(is_user_active=value)
def get_active_bots_owned_by_user(user_profile: UserProfile) -> QuerySet:
return UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile)
def do_deactivate_user(
user_profile: UserProfile, _cascade: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
if not user_profile.is_active:
return
if _cascade:
# We need to deactivate bots before the target user, to ensure
# that a failure partway through this function cannot result
# in only the user being deactivated.
bot_profiles = get_active_bots_owned_by_user(user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False, acting_user=acting_user)
with transaction.atomic():
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.save(update_fields=["is_mirror_dummy"])
change_user_is_active(user_profile, False)
delete_user_sessions(user_profile)
clear_scheduled_emails(user_profile.id)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
increment=-1,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(
type="realm_user",
op="remove",
person=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(
type="realm_bot",
op="remove",
bot=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def do_deactivate_stream(
stream: Stream, log: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
# We want to mark all messages in the to-be-deactivated stream as
# read for all users; otherwise they will pollute queries like
# "Get the user's first unread message". Since this can be an
# expensive operation, we do it via the deferred_work queue
# processor.
deferred_work_event = {
"type": "mark_stream_messages_as_read_for_everyone",
"stream_recipient_id": stream.recipient_id,
}
queue_json_publish("deferred_work", deferred_work_event)
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=True).update(
active=False
)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
# Prepend a substring of the hashed stream ID to the new stream name
streamID = str(stream.id)
stream_id_hash_object = hashlib.sha512(streamID.encode())
hashed_stream_id = stream_id_hash_object.hexdigest()[0:7]
new_name = (hashed_stream_id + "!DEACTIVATED:" + old_name)[: Stream.MAX_NAME_LENGTH]
stream.name = new_name[: Stream.MAX_NAME_LENGTH]
stream.save(update_fields=["name", "deactivated", "invite_only"])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete", streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time,
)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id, new_email=user_profile.email)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id, delivery_email=new_email)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time,
)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update(
old_email=old_email,
new_email=new_email,
activate_url=activation_url,
)
language = user_profile.default_language
send_email(
"zerver/emails/confirm_new_email",
to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language,
context=context,
realm=user_profile.realm,
)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(
lambda realm, email, f: user_profile_delivery_email_cache_key(email, realm),
timeout=3600 * 24 * 7,
)
def create_mirror_user_if_needed(
realm: Realm, email: str, email_to_fullname: Callable[[str], str]
) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def render_incoming_message(
message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> MessageRenderingResult:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendering_result = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton=realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_("Unable to render message"))
return rendering_result
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
online_push_user_ids: Set[int]
pm_mention_email_disabled_user_ids: Set[int]
pm_mention_push_disabled_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
muted_sender_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(
*,
realm_id: int,
recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int] = set(),
possible_wildcard_mention: bool = True,
) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
muted_sender_user_ids: Set[int] = get_muting_users(sender_id)
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert len(message_to_user_ids) in [1, 2]
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert stream_topic is not None
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = (
get_subscriptions_for_send_message(
realm_id=realm_id,
stream_id=stream_topic.stream_id,
possible_wildcard_mention=possible_wildcard_mention,
possibly_mentioned_user_ids=possibly_mentioned_user_ids,
)
.annotate(
user_profile_email_notifications=F(
"user_profile__enable_stream_email_notifications"
),
user_profile_push_notifications=F("user_profile__enable_stream_push_notifications"),
user_profile_wildcard_mentions_notify=F("user_profile__wildcard_mentions_notify"),
)
.values(
"user_profile_id",
"push_notifications",
"email_notifications",
"wildcard_mentions_notify",
"user_profile_email_notifications",
"user_profile_push_notifications",
"user_profile_wildcard_mentions_notify",
"is_muted",
)
.order_by("user_profile_id")
)
message_to_user_ids = [row["user_profile_id"] for row in subscription_rows]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row["is_muted"]:
return False
if row["user_profile_id"] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row["user_profile_" + setting]
stream_push_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send("push_notifications", row)
}
stream_email_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send("email_notifications", row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row["user_profile_id"]
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError("Bad recipient type")
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered Markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(is_active=True).values(
"id",
"enable_online_push_notifications",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"is_bot",
"bot_type",
"long_term_idle",
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(user_ids),
field="id",
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {row["id"] for row in rows if f(row)} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row["is_bot"] and (row["bot_type"] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
online_push_user_ids = get_ids_for(
lambda r: r["enable_online_push_notifications"],
)
# We deal with only the users who have disabled this setting, since that
# will ususally be much smaller a set than those who have enabled it (which
# is the default)
pm_mention_email_disabled_user_ids = get_ids_for(
lambda r: not r["enable_offline_email_notifications"]
)
pm_mention_push_disabled_user_ids = get_ids_for(
lambda r: not r["enable_offline_push_notifications"]
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r["long_term_idle"],
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via Markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row["id"] for row in rows if row["is_bot"] and row["bot_type"] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [(row["id"], row["bot_type"]) for row in rows if is_service_bot(row)]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
online_push_user_ids=online_push_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(
sender: UserProfile,
service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int],
active_user_ids: Set[int],
recipient_type: int,
) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = "outgoing_webhooks"
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = "embedded_bots"
else:
logging.error(
"Unexpected bot_type for Service bot id=%s: %s",
user_profile_id,
bot_type,
)
return
is_stream = recipient_type == Recipient.STREAM
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = "mention"
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = "private_message"
else:
return
event_dict[queue_name].append(
{
"trigger": trigger,
"user_profile_id": user_profile_id,
}
)
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(send_message_requests: Sequence[SendMessageRequest]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for send_request in send_message_requests:
scheduled_message = ScheduledMessage()
scheduled_message.sender = send_request.message.sender
scheduled_message.recipient = send_request.message.recipient
topic_name = send_request.message.topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = send_request.message.content
scheduled_message.sending_client = send_request.message.sending_client
scheduled_message.stream = send_request.stream
scheduled_message.realm = send_request.realm
assert send_request.deliver_at is not None
scheduled_message.scheduled_timestamp = send_request.deliver_at
if send_request.delivery_type == "send_later":
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif send_request.delivery_type == "remind":
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def build_message_send_dict(
message: Message,
stream: Optional[Stream] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
realm: Optional[Realm] = None,
widget_content_dict: Optional[Dict[str, Any]] = None,
email_gateway: bool = False,
) -> SendMessageRequest:
"""Returns a dictionary that can be passed into do_send_messages. In
production, this is always called by check_message, but some
testing code paths call it directly.
"""
if realm is None:
realm = message.sender.realm
mention_data = MentionData(
realm_id=realm.id,
content=message.content,
)
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message.topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
# Render our message_dicts.
assert message.rendered_content is None
rendering_result = render_incoming_message(
message,
message.content,
info["active_user_ids"],
realm,
mention_data=mention_data,
email_gateway=email_gateway,
)
message.rendered_content = rendering_result.rendered_content
message.rendered_content_version = markdown_version
links_for_embed = rendering_result.links_for_preview
mentioned_user_groups_map = get_user_group_mentions_data(
mentioned_user_ids=rendering_result.mentions_user_ids,
mentioned_user_group_ids=list(rendering_result.mentions_user_group_ids),
mention_data=mention_data,
)
# For single user as well as user group mentions, we set the `mentioned`
# flag on `UserMessage`
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if rendering_result.mentions_wildcard:
wildcard_mention_user_ids = info["wildcard_mention_user_ids"]
else:
wildcard_mention_user_ids = set()
"""
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
"""
mentioned_user_ids = rendering_result.mentions_user_ids
default_bot_user_ids = info["default_bot_user_ids"]
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
info["um_eligible_user_ids"] |= mentioned_bot_user_ids
message_send_dict = SendMessageRequest(
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
mention_data=mention_data,
mentioned_user_groups_map=mentioned_user_groups_map,
message=message,
rendering_result=rendering_result,
active_user_ids=info["active_user_ids"],
online_push_user_ids=info["online_push_user_ids"],
pm_mention_email_disabled_user_ids=info["pm_mention_email_disabled_user_ids"],
pm_mention_push_disabled_user_ids=info["pm_mention_push_disabled_user_ids"],
stream_push_user_ids=info["stream_push_user_ids"],
stream_email_user_ids=info["stream_email_user_ids"],
muted_sender_user_ids=info["muted_sender_user_ids"],
um_eligible_user_ids=info["um_eligible_user_ids"],
long_term_idle_user_ids=info["long_term_idle_user_ids"],
default_bot_user_ids=info["default_bot_user_ids"],
service_bot_tuples=info["service_bot_tuples"],
wildcard_mention_user_ids=wildcard_mention_user_ids,
links_for_embed=links_for_embed,
widget_content=widget_content_dict,
)
return message_send_dict
def do_send_messages(
send_message_requests_maybe_none: Sequence[Optional[SendMessageRequest]],
email_gateway: bool = False,
mark_as_read: Sequence[int] = [],
) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
send_message_requests = [
send_request
for send_request in send_message_requests_maybe_none
if send_request is not None
]
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create(send_request.message for send_request in send_message_requests)
# Claim attachments in message
for send_request in send_message_requests:
if do_claim_attachments(
send_request.message, send_request.rendering_result.potential_attachment_path_ids
):
send_request.message.has_attachment = True
send_request.message.save(update_fields=["has_attachment"])
ums: List[UserMessageLite] = []
for send_request in send_message_requests:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = send_request.rendering_result.mentions_user_ids
# Extend the set with users who have muted the sender.
mark_as_read_for_users = send_request.muted_sender_user_ids
mark_as_read_for_users.update(mark_as_read)
user_messages = create_user_messages(
message=send_request.message,
rendering_result=send_request.rendering_result,
um_eligible_user_ids=send_request.um_eligible_user_ids,
long_term_idle_user_ids=send_request.long_term_idle_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
mentioned_user_ids=mentioned_user_ids,
mark_as_read_for_users=mark_as_read_for_users,
)
for um in user_messages:
user_message_flags[send_request.message.id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
send_request.message.service_queue_events = get_service_bot_events(
sender=send_request.message.sender,
service_bot_tuples=send_request.service_bot_tuples,
mentioned_user_ids=mentioned_user_ids,
active_user_ids=send_request.active_user_ids,
recipient_type=send_request.message.recipient.type,
)
bulk_insert_ums(ums)
for send_request in send_message_requests:
do_widget_post_save_actions(send_request)
# This next loop is responsible for notifying other parts of the
# Zulip system about the messages we just committed to the database:
# * Notifying clients via send_event
# * Triggering outgoing webhooks via the service event queue.
# * Updating the `first_message_id` field for streams without any message history.
# * Implementing the Welcome Bot reply hack
# * Adding links to the embed_links queue for open graph processing.
for send_request in send_message_requests:
realm_id: Optional[int] = None
if send_request.message.is_stream_message():
if send_request.stream is None:
stream_id = send_request.message.recipient.type_id
send_request.stream = Stream.objects.select_related().get(id=stream_id)
# assert needed because stubs for django are missing
assert send_request.stream is not None
realm_id = send_request.stream.realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(send_request.message, realm_id)
user_flags = user_message_flags.get(send_request.message.id, {})
"""
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
"""
user_ids = send_request.active_user_ids | set(user_flags.keys())
sender_id = send_request.message.sender_id
# We make sure the sender is listed first in the `users` list;
# this results in the sender receiving the message first if
# there are thousands of recipients, decreasing perceived latency.
if sender_id in user_ids:
user_list = [sender_id] + list(user_ids - {sender_id})
else:
user_list = list(user_ids)
class UserData(TypedDict):
id: int
flags: List[str]
mentioned_user_group_id: Optional[int]
users: List[UserData] = []
for user_id in user_list:
flags = user_flags.get(user_id, [])
user_data: UserData = dict(id=user_id, flags=flags, mentioned_user_group_id=None)
if user_id in send_request.mentioned_user_groups_map:
user_data["mentioned_user_group_id"] = send_request.mentioned_user_groups_map[
user_id
]
users.append(user_data)
sender = send_request.message.sender
message_type = wide_message_dict["type"]
active_users_data = [
ActivePresenceIdleUserData(
alerted="has_alert_word" in user_flags.get(user_id, []),
notifications_data=UserMessageNotificationsData.from_user_id_sets(
user_id=user_id,
flags=user_flags.get(user_id, []),
private_message=(message_type == "private"),
online_push_user_ids=send_request.online_push_user_ids,
pm_mention_push_disabled_user_ids=send_request.pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=send_request.pm_mention_email_disabled_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
wildcard_mention_user_ids=send_request.wildcard_mention_user_ids,
muted_sender_user_ids=send_request.muted_sender_user_ids,
),
)
for user_id in send_request.active_user_ids
]
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
active_users_data=active_users_data,
)
event = dict(
type="message",
message=send_request.message.id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
online_push_user_ids=list(send_request.online_push_user_ids),
pm_mention_push_disabled_user_ids=list(send_request.pm_mention_push_disabled_user_ids),
pm_mention_email_disabled_user_ids=list(
send_request.pm_mention_email_disabled_user_ids
),
stream_push_user_ids=list(send_request.stream_push_user_ids),
stream_email_user_ids=list(send_request.stream_email_user_ids),
wildcard_mention_user_ids=list(send_request.wildcard_mention_user_ids),
muted_sender_user_ids=list(send_request.muted_sender_user_ids),
)
if send_request.message.is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
# assert needed because stubs for django are missing
assert send_request.stream is not None
if send_request.stream.is_public():
event["realm_id"] = send_request.stream.realm_id
event["stream_name"] = send_request.stream.name
if send_request.stream.invite_only:
event["invite_only"] = True
if send_request.stream.first_message_id is None:
send_request.stream.first_message_id = send_request.message.id
send_request.stream.save(update_fields=["first_message_id"])
if send_request.local_id is not None:
event["local_id"] = send_request.local_id
if send_request.sender_queue_id is not None:
event["sender_queue_id"] = send_request.sender_queue_id
send_event(send_request.realm, event, users)
if send_request.links_for_embed:
event_data = {
"message_id": send_request.message.id,
"message_content": send_request.message.content,
"message_realm_id": send_request.realm.id,
"urls": list(send_request.links_for_embed),
}
queue_json_publish("embed_links", event_data)
if send_request.message.recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(
settings.WELCOME_BOT, send_request.message.sender.realm_id
).id
if (
welcome_bot_id in send_request.active_user_ids
and welcome_bot_id != send_request.message.sender_id
):
from zerver.lib.onboarding import send_welcome_bot_response
send_welcome_bot_response(send_request)
for queue_name, events in send_request.message.service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event["trigger"],
"user_profile_id": event["user_profile_id"],
},
)
return [send_request.message.id for send_request in send_message_requests]
class UserMessageLite:
"""
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
"""
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(
message: Message,
rendering_result: MessageRenderingResult,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read_for_users: Set[int],
) -> List[UserMessageLite]:
# These properties on the Message are set via
# render_markdown by code in the Markdown inline patterns
ids_with_alert_words = rendering_result.user_ids_with_alert_words
sender_id = message.sender.id
is_stream_message = message.is_stream_message()
base_flags = 0
if rendering_result.mentions_wildcard:
base_flags |= UserMessage.flags.wildcard_mentioned
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
base_flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for user_profile_id in um_eligible_user_ids:
flags = base_flags
if (
user_profile_id == sender_id and message.sent_by_human()
) or user_profile_id in mark_as_read_for_users:
flags |= UserMessage.flags.read
if user_profile_id in mentioned_user_ids:
flags |= UserMessage.flags.mentioned
if user_profile_id in ids_with_alert_words:
flags |= UserMessage.flags.has_alert_word
if (
user_profile_id in long_term_idle_user_ids
and user_profile_id not in stream_push_user_ids
and user_profile_id not in stream_email_user_ids
and is_stream_message
and int(flags) == 0
):
continue
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=flags,
)
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
"""
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
"""
if not ums:
return
vals = [(um.user_profile_id, um.message_id, um.flags) for um in ums]
query = SQL(
"""
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
"""
)
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def verify_submessage_sender(
*,
message_id: int,
message_sender_id: int,
submessage_sender_id: int,
) -> None:
"""Even though our submessage architecture is geared toward
collaboration among all message readers, we still enforce
the the first person to attach a submessage to the message
must be the original sender of the message.
"""
if message_sender_id == submessage_sender_id:
return
if SubMessage.objects.filter(
message_id=message_id,
sender_id=message_sender_id,
).exists():
return
raise JsonableError(_("You cannot attach a submessage to this message."))
def do_add_submessage(
realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
transaction.on_commit(lambda: send_event(realm, event, target_user_ids))
def notify_reaction_update(
user_profile: UserProfile, message: Message, reaction: Reaction, op: str
) -> None:
user_dict = {
"user_id": user_profile.id,
"email": user_profile.email,
"full_name": user_profile.full_name,
}
event: Dict[str, Any] = {
"type": "reaction",
"op": op,
"user_id": user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
"user": user_dict,
"message_id": message.id,
"emoji_name": reaction.emoji_name,
"emoji_code": reaction.emoji_code,
"reaction_type": reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message, plus subscribers of
# streams with the access to stream's full history.
#
# This means reactions won't live-update in preview narrows for a
# stream the user isn't yet subscribed to; this is the right
# performance tradeoff to avoid sending every reaction to public
# stream messages to all users.
#
# To ensure that reactions do live-update for any user who has
# actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications, even if they are not
# subscribed to the stream.
user_ids = set(
UserMessage.objects.filter(message=message.id).values_list("user_profile_id", flat=True)
)
if message.recipient.type == Recipient.STREAM:
stream_id = message.recipient.type_id
stream = Stream.objects.get(id=stream_id)
user_ids |= subscriber_ids_with_stream_history_access(stream)
transaction.on_commit(lambda: send_event(user_profile.realm, event, list(user_ids)))
def do_add_reaction(
user_profile: UserProfile,
message: Message,
emoji_name: str,
emoji_code: str,
reaction_type: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction(
user_profile=user_profile,
message=message,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def check_add_reaction(
user_profile: UserProfile,
message_id: int,
emoji_name: str,
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
message, user_message = access_message(user_profile, message_id, lock_message=True)
if emoji_code is None:
# The emoji_code argument is only required for rare corner
# cases discussed in the long block comment below. For simple
# API clients, we allow specifying just the name, and just
# look up the code using the current name->code mapping.
emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]
if reaction_type is None:
reaction_type = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[1]
if Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).exists():
raise JsonableError(_("Reaction already exists."))
query = Reaction.objects.filter(
message=message, emoji_code=emoji_code, reaction_type=reaction_type
)
if query.exists():
# If another user has already reacted to this message with
# same emoji code, we treat the new reaction as a vote for the
# existing reaction. So the emoji name used by that earlier
# reaction takes precedence over whatever was passed in this
# request. This is necessary to avoid a message having 2
# "different" emoji reactions with the same emoji code (and
# thus same image) on the same message, which looks ugly.
#
# In this "voting for an existing reaction" case, we shouldn't
# check whether the emoji code and emoji name match, since
# it's possible that the (emoji_type, emoji_name, emoji_code)
# triple for this existing rection xmay not pass validation
# now (e.g. because it is for a realm emoji that has been
# since deactivated). We still want to allow users to add a
# vote any old reaction they see in the UI even if that is a
# deactivated custom emoji, so we just use the emoji name from
# the existing reaction with no further validation.
reaction = query.first()
assert reaction is not None
emoji_name = reaction.emoji_name
else:
# Otherwise, use the name provided in this request, but verify
# it is valid in the user's realm (e.g. not a deactivated
# realm emoji).
check_emoji_request(user_profile.realm, emoji_name, emoji_code, reaction_type)
if user_message is None:
# Users can see and react to messages sent to streams they
# were not a subscriber to; in order to receive events for
# those, we give the user a `historical` UserMessage objects
# for the message. This is the same trick we use for starring
# messages.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
def do_remove_reaction(
user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [
{"user_id": profile.id, "email": profile.email} for profile in recipient_user_profiles
]
event = dict(
type="typing",
message_type="private",
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [user.id for user in recipient_user_profiles if user.is_active]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None:
realm = sender.realm
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def do_send_stream_typing_notification(
sender: UserProfile, operator: str, stream: Stream, topic: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
event = dict(
type="typing",
message_type="stream",
op=operator,
sender=sender_dict,
stream_id=stream.id,
topic=topic,
)
user_ids_to_notify = get_user_ids_for_streams({stream.id})[stream.id]
send_event(sender.realm, event, user_ids_to_notify)
def ensure_stream(
realm: Realm,
stream_name: str,
invite_only: bool = False,
stream_description: str = "",
*,
acting_user: Optional[UserProfile],
) -> Stream:
return create_stream_if_needed(
realm,
stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user,
)[0]
def get_recipient_from_user_profiles(
recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {user_profile.id: user_profile for user_profile in recipient_profiles}
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map:
del recipient_profiles_map[sender.id]
assert recipient_profiles_map
if len(recipient_profiles_map) == 1:
[user_profile] = recipient_profiles_map.values()
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids = set(recipient_profiles_map)
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(
user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool = False
) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (
not user_profile.is_active
and not user_profile.is_mirror_dummy
and not allow_deactivated
) or user_profile.realm.deactivated:
raise ValidationError(
_("'{email}' is no longer using Zulip.").format(email=user_profile.email)
)
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(
user_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
allow_deactivated: bool = False,
) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(
user_profiles, sender, allow_deactivated=allow_deactivated
)
return get_recipient_from_user_profiles(
recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window,
)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
data = s
if isinstance(data, str):
data = data.split(",")
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Collection[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Collection[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(
sender: UserProfile,
client: Client,
stream_name: str,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_stream_message_by_id(
sender: UserProfile,
client: Client,
stream_id: int,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_id(stream_id, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(
sender: UserProfile, client: Client, receiving_user: UserProfile, body: str
) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
*,
skip_stream_access_check: bool = False,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
try:
message = check_message(
sender,
client,
addressee,
message_content,
realm,
forged,
forged_timestamp,
forwarder_user_profile,
local_id,
sender_queue_id,
widget_content,
skip_stream_access_check=skip_stream_access_check,
)
except ZephyrMessageAlreadySentException as e:
return e.message_id
return do_send_messages([message])[0]
def check_schedule_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str],
message_content: str,
delivery_type: str,
deliver_at: datetime.datetime,
realm: Optional[Realm] = None,
forwarder_user_profile: Optional[UserProfile] = None,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
send_request = check_message(
sender,
client,
addressee,
message_content,
realm=realm,
forwarder_user_profile=forwarder_user_profile,
)
send_request.deliver_at = deliver_at
send_request.delivery_type = delivery_type
recipient = send_request.message.recipient
if delivery_type == "remind" and (
recipient.type != Recipient.STREAM and recipient.type_id != sender.id
):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([send_request])[0]
def validate_message_edit_payload(
message: Message,
stream_id: Optional[int],
topic_name: Optional[str],
propagate_mode: Optional[str],
content: Optional[str],
) -> None:
"""
Checks that the data sent is well-formed. Does not handle editability, permissions etc.
"""
if topic_name is None and content is None and stream_id is None:
raise JsonableError(_("Nothing to change"))
if not message.is_stream_message():
if stream_id is not None:
raise JsonableError(_("Private messages cannot be moved to streams."))
if topic_name is not None:
raise JsonableError(_("Private messages cannot have topics."))
if propagate_mode != "change_one" and topic_name is None and stream_id is None:
raise JsonableError(_("Invalid propagate_mode without topic edit"))
if topic_name == "":
raise JsonableError(_("Topic can't be empty"))
if stream_id is not None and content is not None:
raise JsonableError(_("Cannot change message content while changing stream"))
# Right now, we prevent users from editing widgets.
if content is not None and is_widget_message(message):
raise JsonableError(_("Widgets cannot be edited."))
def can_edit_content_or_topic(
message: Message,
user_profile: UserProfile,
is_no_topic_msg: bool,
content: Optional[str] = None,
topic_name: Optional[str] = None,
) -> bool:
# You have permission to edit the message (both content and topic) if you sent it.
if message.sender_id == user_profile.id:
return True
# You cannot edit the content of message sent by someone else.
if content is not None:
return False
assert topic_name is not None
# The following cases are the various reasons a user might be
# allowed to edit topics.
# We allow anyone to edit (no topic) messages to help tend them.
if is_no_topic_msg:
return True
# The can_edit_topic_of_any_message helper returns whether the user can edit the topic
# or not based on edit_topic_policy setting and the user's role.
if user_profile.can_edit_topic_of_any_message():
return True
return False
def check_update_message(
user_profile: UserProfile,
message_id: int,
stream_id: Optional[int] = None,
topic_name: Optional[str] = None,
propagate_mode: str = "change_one",
send_notification_to_old_thread: bool = True,
send_notification_to_new_thread: bool = True,
content: Optional[str] = None,
) -> int:
"""This will update a message given the message id and user profile.
It checks whether the user profile has the permission to edit the message
and raises a JsonableError if otherwise.
It returns the number changed.
"""
message, ignored_user_message = access_message(user_profile, message_id)
if not user_profile.realm.allow_message_editing:
raise JsonableError(_("Your organization has turned off message editing"))
# The zerver/views/message_edit.py callpoint already strips this
# via REQ_topic; so we can delete this line if we arrange a
# contract where future callers in the embedded bots system strip
# use REQ_topic as well (or otherwise are guaranteed to strip input).
if topic_name is not None:
topic_name = topic_name.strip()
if topic_name == message.topic_name():
topic_name = None
validate_message_edit_payload(message, stream_id, topic_name, propagate_mode, content)
is_no_topic_msg = message.topic_name() == "(no topic)"
if content is not None or topic_name is not None:
if not can_edit_content_or_topic(
message, user_profile, is_no_topic_msg, content, topic_name
):
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has passed"))
# If there is a change to the topic, check that the user is allowed to
# edit it and that it has not been too long. If this is not the user who
# sent the message, they are not the admin, and the time limit for editing
# topics is passed, raise an error.
if (
topic_name is not None
and message.sender != user_profile
and not user_profile.is_realm_admin
and not user_profile.is_moderator
and not is_no_topic_msg
):
deadline_seconds = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message's topic has passed"))
rendering_result = None
links_for_embed: Set[str] = set()
prior_mention_user_ids: Set[int] = set()
mention_data: Optional[MentionData] = None
if content is not None:
if content.rstrip() == "":
content = "(deleted)"
content = normalize_body(content)
mention_data = MentionData(
realm_id=user_profile.realm.id,
content=content,
)
user_info = get_user_info_for_message_updates(message.id)
prior_mention_user_ids = user_info["mention_user_ids"]
# We render the message using the current user's realm; since
# the cross-realm bots never edit messages, this should be
# always correct.
# Note: If rendering fails, the called code will raise a JsonableError.
rendering_result = render_incoming_message(
message,
content,
user_info["message_user_ids"],
user_profile.realm,
mention_data=mention_data,
)
links_for_embed |= rendering_result.links_for_preview
new_stream = None
number_changed = 0
if stream_id is not None:
assert message.is_stream_message()
if not user_profile.can_move_messages_between_streams():
raise JsonableError(_("You don't have permission to move this message"))
try:
access_stream_by_id(user_profile, message.recipient.type_id)
except JsonableError:
raise JsonableError(
_(
"You don't have permission to move this message due to missing access to its stream"
)
)
new_stream = access_stream_by_id(user_profile, stream_id, require_active=True)[0]
check_stream_access_based_on_stream_post_policy(user_profile, new_stream)
number_changed = do_update_message(
user_profile,
message,
new_stream,
topic_name,
propagate_mode,
send_notification_to_old_thread,
send_notification_to_new_thread,
content,
rendering_result,
prior_mention_user_ids,
mention_data,
)
if links_for_embed:
event_data = {
"message_id": message.id,
"message_content": message.content,
# The choice of `user_profile.realm_id` rather than
# `sender.realm_id` must match the decision made in the
# `render_incoming_message` call earlier in this function.
"message_realm_id": user_profile.realm_id,
"urls": list(links_for_embed),
}
queue_json_publish("embed_links", event_data)
return number_changed
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(
_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
)
)
for i in group_name:
if ord(i) == 0:
raise JsonableError(
_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
)
)
def send_rate_limited_pm_notification_to_bot_owner(
sender: UserProfile, realm: Realm, content: str
) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT, sender.bot_owner.realm_id),
sender.bot_owner,
content,
)
sender.last_reminder = timezone_now()
sender.save(update_fields=["last_reminder"])
def send_pm_if_empty_stream(
stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str] = None,
stream_id: Optional[int] = None,
) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _(
"Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID."
).format(**arg_dict)
else:
assert stream_name is not None
content = _(
"Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it."
).format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _(
"Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers."
).format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(
stream_name: str, realm: Realm, sender: UserProfile
) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(
stream_id: int, realm: Realm, sender: UserProfile
) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(
realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]
) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(
sender: UserProfile,
client: Client,
addressee: Addressee,
message_content_raw: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
email_gateway: bool = False,
*,
skip_stream_access_check: bool = False,
) -> SendMessageRequest:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = normalize_body(message_content_raw)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
# To save a database round trip, we construct the Recipient
# object for the Stream rather than fetching it from the
# database using the stream.recipient foreign key.
#
# This is simpler than ensuring that code paths that fetch a
# Stream that will be used for sending a message have a
# `select_related("recipient"), which would also needlessly
# expand Stream objects in memory (all the fields of Recipient
# are already known given the Stream object).
recipient = Recipient(
id=stream.recipient_id,
type_id=stream.id,
type=Recipient.STREAM,
)
if not skip_stream_access_check:
access_stream_for_send_message(
sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile
)
else:
# Defensive assertion - the only currently supported use case
# for this option is for outgoing webhook bots and since this
# is security-sensitive code, it's beneficial to ensure nothing
# else can sneak past the access check.
assert sender.bot_type == sender.OUTGOING_WEBHOOK_BOT
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in [
"zephyr_mirror",
"irc_mirror",
"jabber_mirror",
"JabberMirror",
]
check_private_message_policy(realm, sender, user_profiles)
# API super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(
user_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
raise ZephyrMessageAlreadySentException(id)
widget_content_dict = None
if widget_content is not None:
try:
widget_content_dict = orjson.loads(widget_content)
except orjson.JSONDecodeError:
raise JsonableError(_("Widgets: API programmer sent invalid JSON content"))
try:
check_widget_content(widget_content_dict)
except ValidationError as error:
raise JsonableError(
_("Widgets: {error_msg}").format(
error_msg=error.message,
)
)
message_send_dict = build_message_send_dict(
message=message,
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
widget_content_dict=widget_content_dict,
email_gateway=email_gateway,
)
if stream is not None and message_send_dict.rendering_result.mentions_wildcard:
if not wildcard_mention_allowed(sender, stream):
raise JsonableError(
_("You do not have permission to use wildcard mentions in this stream.")
)
return message_send_dict
def _internal_prep_message(
realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str,
email_gateway: bool = False,
) -> Optional[SendMessageRequest]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > settings.MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(
sender,
get_client("Internal"),
addressee,
content,
realm=realm,
email_gateway=email_gateway,
)
except JsonableError as e:
logging.exception(
"Error queueing internal message by %s: %s",
sender.delivery_email,
e.msg,
stack_info=True,
)
return None
def internal_prep_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
realm = stream.realm
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
email_gateway=email_gateway,
)
def internal_prep_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(
realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(
sender: UserProfile, recipient_user: UserProfile, content: str
) -> Optional[int]:
realm = recipient_user.realm
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
) -> Optional[int]:
message = internal_prep_stream_message(sender, stream, topic, content, email_gateway)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm,
sender,
stream_name,
topic,
content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(
realm: Realm, sender: UserProfile, emails: List[str], content: str
) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, used_colors: Set[str]) -> str:
# These colors are shared with the palette in stream_settings_ui.js.
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(
user_profile: Optional[UserProfile], stream: Stream
) -> None:
"""Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{
"realm_id": stream.realm_id,
"is_web_public": stream.is_web_public,
"invite_only": stream.invite_only,
},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id),
)
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Even guest users can access subscribers to web-public streams,
# since they can freely become subscribers to these streams.
if stream_dict["is_web_public"]:
return
# With the exception of web public streams, a guest must
# be subscribed to a stream (even a public one) in order
# to see subscribers.
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could explicitly handle the case where guests aren't
# subscribed here in an `else` statement or we can fall
# through to the subsequent logic. Tim prefers the latter.
# Adding an `else` would ensure better code coverage.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if stream_dict["invite_only"] and not check_user_subscribed(user_profile):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(
stream_dicts: Collection[Mapping[str, Any]],
user_profile: UserProfile,
subscribed_stream_ids: Set[int],
) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_id = stream_dict["id"]
is_subscribed = stream_id in subscribed_stream_ids
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: is_subscribed,
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts}
recipient_ids = sorted(stream["recipient_id"] for stream in target_stream_dicts)
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
"""
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
"""
query = SQL(
"""
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_subscription.is_user_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
"""
)
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
"""
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
"""
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
return get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=False)
def get_subscriber_ids(stream: Stream, requesting_user: Optional[UserProfile] = None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
return subscriptions_query.values_list("user_profile_id", flat=True)
def send_subscription_add_events(
realm: Realm,
sub_info_list: List[SubInfo],
subscriber_dict: Dict[int, Set[int]],
) -> None:
info_by_user: Dict[int, List[SubInfo]] = defaultdict(list)
for sub_info in sub_info_list:
info_by_user[sub_info.user.id].append(sub_info)
stream_ids = {sub_info.stream.id for sub_info in sub_info_list}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
for user_id, sub_infos in info_by_user.items():
sub_dicts = []
for sub_info in sub_infos:
stream = sub_info.stream
subscription = sub_info.sub
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict["in_home_view"] = not subscription.is_muted
sub_dict["email_address"] = encode_email_address(stream, show_sender=True)
sub_dict["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic
)
if stream.is_in_zephyr_realm and not stream.invite_only:
sub_dict["subscribers"] = []
else:
sub_dict["subscribers"] = list(subscriber_dict[stream.id])
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add", subscriptions=sub_dicts)
send_event(realm, event, [user_id])
SubT = Tuple[List[SubInfo], List[SubInfo]]
def bulk_add_subscriptions(
realm: Realm,
streams: Collection[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str] = {},
from_user_creation: bool = False,
*,
acting_user: Optional[UserProfile],
) -> SubT:
users = list(users)
# Sanity check out callers
for stream in streams:
assert stream.realm_id == realm.id
for user in users:
assert user.realm_id == realm.id
recipient_id_to_stream = {stream.recipient_id: stream for stream in streams}
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users)
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed: List[SubInfo] = []
subs_to_activate: List[SubInfo] = []
subs_to_add: List[SubInfo] = []
for user_profile in users:
my_subs = subs_by_user[user_profile.id]
used_colors = {sub.color for sub in my_subs}
# Make a fresh set of all new recipient ids, and then we will
# remove any for which our user already has a subscription
# (and we'll re-activate any subscriptions as needed).
new_recipient_ids = {stream.recipient_id for stream in streams}
for sub in my_subs:
if sub.recipient_id in new_recipient_ids:
new_recipient_ids.remove(sub.recipient_id)
stream = recipient_id_to_stream[sub.recipient_id]
sub_info = SubInfo(user_profile, sub, stream)
if sub.active:
already_subscribed.append(sub_info)
else:
subs_to_activate.append(sub_info)
for recipient_id in new_recipient_ids:
stream = recipient_id_to_stream[recipient_id]
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, used_colors)
used_colors.add(color)
sub = Subscription(
user_profile=user_profile,
is_user_active=user_profile.is_active,
active=True,
color=color,
recipient_id=recipient_id,
)
sub_info = SubInfo(user_profile, sub, stream)
subs_to_add.append(sub_info)
bulk_add_subs_to_db_with_logging(
realm=realm,
acting_user=acting_user,
subs_to_add=subs_to_add,
subs_to_activate=subs_to_activate,
)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
for sub_info in subs_to_add + subs_to_activate:
altered_user_dict[sub_info.stream.id].add(sub_info.user.id)
stream_dict = {stream.id: stream for stream in streams}
new_streams = [stream_dict[stream_id] for stream_id in altered_user_dict]
subscriber_peer_info = bulk_get_subscriber_peer_info(
realm=realm,
streams=new_streams,
)
# We now send several types of events to notify browsers. The
# first batches of notifications are sent only to the user(s)
# being subscribed; we can skip these notifications when this is
# being called from the new user creation flow.
if not from_user_creation:
send_stream_creation_events_for_private_streams(
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
)
send_subscription_add_events(
realm=realm,
sub_info_list=subs_to_add + subs_to_activate,
subscriber_dict=subscriber_peer_info.subscribed_ids,
)
send_peer_subscriber_events(
op="peer_add",
realm=realm,
altered_user_dict=altered_user_dict,
stream_dict=stream_dict,
private_peer_dict=subscriber_peer_info.private_peer_dict,
)
return (
subs_to_add + subs_to_activate,
already_subscribed,
)
# This function contains all the database changes as part of
# subscribing users to streams; we use a transaction to ensure that
# the RealmAuditLog entries are created atomically with the
# Subscription object creation (and updates).
@transaction.atomic
def bulk_add_subs_to_db_with_logging(
realm: Realm,
acting_user: Optional[UserProfile],
subs_to_add: List[SubInfo],
subs_to_activate: List[SubInfo],
) -> None:
Subscription.objects.bulk_create(info.sub for info in subs_to_add)
sub_ids = [info.sub.id for info in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for sub_info in subs_to_add:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time,
)
)
for sub_info in subs_to_activate:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time,
)
)
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def send_stream_creation_events_for_private_streams(
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
for stream_id, stream_users_ids in altered_user_dict.items():
stream = stream_dict[stream_id]
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
notify_user_ids = list(stream_users_ids - realm_admin_ids)
if notify_user_ids:
send_stream_creation_event(stream, notify_user_ids)
def send_peer_subscriber_events(
op: str,
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
private_peer_dict: Dict[int, Set[int]],
) -> None:
# Send peer_add/peer_remove events to other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
assert op in ["peer_add", "peer_remove"]
private_stream_ids = [
stream_id for stream_id in altered_user_dict if stream_dict[stream_id].invite_only
]
for stream_id in private_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = private_peer_dict[stream_id] - altered_user_ids
if peer_user_ids and altered_user_ids:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
public_stream_ids = [
stream_id
for stream_id in altered_user_dict
if not stream_dict[stream_id].invite_only and not stream_dict[stream_id].is_in_zephyr_realm
]
if public_stream_ids:
user_streams: Dict[int, Set[int]] = defaultdict(set)
public_peer_ids = set(active_non_guest_user_ids(realm.id))
for stream_id in public_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = public_peer_ids - altered_user_ids
if peer_user_ids and altered_user_ids:
if len(altered_user_ids) == 1:
# If we only have one user, we will try to
# find other streams they have (un)subscribed to
# (where it's just them). This optimization
# typically works when a single user is subscribed
# to multiple default public streams during
# new-user registration.
#
# This optimization depends on all public streams
# having the same peers for any single user, which
# isn't the case for private streams.
altered_user_id = list(altered_user_ids)[0]
user_streams[altered_user_id].add(stream_id)
else:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
for user_id, stream_ids in user_streams.items():
peer_user_ids = public_peer_ids - {user_id}
event = dict(
type="subscription",
op=op,
stream_ids=sorted(list(stream_ids)),
user_ids=[user_id],
)
send_event(realm, event, peer_user_ids)
def send_peer_remove_events(
realm: Realm,
streams: List[Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
private_streams = [stream for stream in streams if stream.invite_only]
private_peer_dict = bulk_get_private_peers(
realm=realm,
private_streams=private_streams,
)
stream_dict = {stream.id: stream for stream in streams}
send_peer_subscriber_events(
op="peer_remove",
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
private_peer_dict=private_peer_dict,
)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path("audio/notification_sounds")
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if "." in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == ".ogg":
available_notification_sounds.append(root)
return sorted(available_notification_sounds)
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove", subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(
users: Iterable[UserProfile],
streams: Iterable[Stream],
*,
acting_user: Optional[UserProfile],
) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, streams)
def get_non_subscribed_subs() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {sub_info.stream.id for sub_info in user_sub_stream_info}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_subs()
subs_to_deactivate: List[SubInfo] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for sub_infos in existing_subs_by_user.values():
for sub_info in sub_infos:
subs_to_deactivate.append(sub_info)
sub_ids_to_deactivate.append(sub_info.sub.id)
our_realm = users[0].realm
# We do all the database changes in a transaction to ensure
# RealmAuditLog entries are atomically created when making changes.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
).update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [
RealmAuditLog(
realm=sub_info.user.realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time,
)
for sub_info in subs_to_deactivate
]
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for sub_info in subs_to_deactivate:
stream = sub_info.stream
streams_by_user[sub_info.user.id].append(stream)
altered_user_dict[stream.id].add(sub_info.user.id)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {
"type": "mark_stream_messages_as_read",
"user_profile_id": user_profile.id,
"stream_recipient_ids": [stream.recipient_id for stream in streams],
}
queue_json_publish("deferred_work", event)
send_peer_remove_events(
realm=our_realm,
streams=streams,
altered_user_dict=altered_user_dict,
)
new_vacant_streams = set(occupied_streams_before) - set(occupied_streams_after)
new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only]
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub_info.user, sub_info.stream) for sub_info in subs_to_deactivate],
not_subscribed,
)
def do_change_subscription_property(
user_profile: UserProfile,
sub: Subscription,
stream: Stream,
property_name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
old_value = getattr(sub, database_property_name)
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.SUBSCRIPTION_PROPERTY_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
modified_stream=stream,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: database_value,
"property": database_property_name,
}
).decode(),
)
event = dict(
type="subscription",
op="update",
property=event_property_name,
value=event_value,
stream_id=stream.id,
)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool = True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time,
)
def do_change_full_name(
user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]
) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time,
extra_data=old_name,
)
payload = dict(user_id=user_profile.id, full_name=user_profile.full_name)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(type="realm_bot", op="update", bot=payload),
bot_owner_user_ids(user_profile),
)
def check_change_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile
) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile
) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(
user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile
) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time,
)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="delete",
bot=dict(
user_id=user_profile.id,
),
),
{previous_owner.id},
)
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
),
),
update_users,
)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time,
)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time,
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
api_key=new_api_key,
),
),
bot_owner_user_ids(user_profile),
)
event = {"type": "clear_push_device_tokens", "user_profile_id": user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
),
),
bot_owner_user_ids(user_profile),
)
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
def do_change_avatar_fields(
user_profile: UserProfile,
avatar_source: str,
skip_notify: bool = False,
*,
acting_user: Optional[UserProfile],
) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={"avatar_source": avatar_source},
event_time=event_time,
acting_user=acting_user,
)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
def do_change_icon_source(
realm: Realm, icon_source: str, *, acting_user: Optional[UserProfile]
) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_ICON_SOURCE_CHANGED,
extra_data={"icon_source": icon_source, "icon_version": realm.icon_version},
event_time=event_time,
acting_user=acting_user,
)
send_event(
realm,
dict(
type="realm",
op="update_dict",
property="icon",
data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm)),
),
active_user_ids(realm.id),
)
def do_change_logo_source(
realm: Realm, logo_source: str, night: bool, *, acting_user: Optional[UserProfile]
) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
)
event = dict(
type="realm",
op="update_dict",
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night),
)
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(
realm: Realm, plan_type: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=["plan_type"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_value": old_value, "new_value": plan_type},
)
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=["_max_invites", "message_visibility_limit", "upload_quota_gb"])
event = {
"type": "realm",
"op": "update",
"property": "plan_type",
"value": plan_type,
"extra_data": {"upload_quota": realm.upload_quota_bytes()},
}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_sending_stream_id
user_profile.default_sending_stream = stream
user_profile.save(update_fields=["default_sending_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_SENDING_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_sending_stream=stream_name,
),
),
bot_owner_user_ids(user_profile),
)
def do_change_default_events_register_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_events_register_stream_id
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=["default_events_register_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_REGISTER_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_events_register_stream=stream_name,
),
),
bot_owner_user_ids(user_profile),
)
def do_change_default_all_public_streams(
user_profile: UserProfile, value: bool, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_all_public_streams
user_profile.default_all_public_streams = value
user_profile.save(update_fields=["default_all_public_streams"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
}
).decode(),
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
),
),
bot_owner_user_ids(user_profile),
)
def do_change_user_role(
user_profile: UserProfile, value: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_make_user_billing_admin(user_profile: UserProfile) -> None:
user_profile.is_billing_admin = True
user_profile.save(update_fields=["is_billing_admin"])
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, is_billing_admin=True)
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_can_forge_sender(user_profile: UserProfile, value: bool) -> None:
user_profile.can_forge_sender = value
user_profile.save(update_fields=["can_forge_sender"])
def do_change_can_create_users(user_profile: UserProfile, value: bool) -> None:
user_profile.can_create_users = value
user_profile.save(update_fields=["can_create_users"])
def do_change_stream_invite_only(
stream: Stream, invite_only: bool, history_public_to_subscribers: Optional[bool] = None
) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.is_web_public = False
stream.save(update_fields=["invite_only", "history_public_to_subscribers", "is_web_public"])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
is_web_public=False,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_make_stream_web_public(stream: Stream) -> None:
stream.is_web_public = True
stream.invite_only = False
stream.history_public_to_subscribers = True
stream.save(update_fields=["invite_only", "history_public_to_subscribers", "is_web_public"])
# We reuse "invite_only" stream update API route here because
# both are similar events and similar UI updates will be required
# by the client to update this property for the user.
event = dict(
op="update",
type="stream",
property="invite_only",
value=False,
history_public_to_subscribers=True,
is_web_public=True,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_permission(
stream: Stream,
invite_only: Optional[bool] = None,
history_public_to_subscribers: Optional[bool] = None,
is_web_public: Optional[bool] = None,
) -> None:
# TODO: Ideally this would be just merged with do_change_stream_invite_only.
if is_web_public:
do_make_stream_web_public(stream)
else:
assert invite_only is not None
do_change_stream_invite_only(stream, invite_only, history_public_to_subscribers)
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=["stream_post_policy"])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=user_profile,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_NAME_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_name,
RealmAuditLog.NEW_VALUE: new_name,
}
).decode(),
)
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
["email_address", new_email],
["name", new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT, stream.realm_id)
with override_language(stream.realm.default_language):
internal_send_stream_message(
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_("{user_name} renamed stream {old_stream_name} to {new_stream_name}.").format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=["description", "rendered_description"])
event = dict(
type="stream",
op="update",
property="description",
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_message_retention_days(
stream: Stream, message_retention_days: Optional[int] = None
) -> None:
stream.message_retention_days = message_retention_days
stream.save(update_fields=["message_retention_days"])
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(
string_id: str,
name: str,
*,
emails_restricted_to_domains: Optional[bool] = None,
email_address_visibility: Optional[int] = None,
description: Optional[str] = None,
invite_required: Optional[bool] = None,
plan_type: Optional[int] = None,
org_type: Optional[int] = None,
date_created: Optional[datetime.datetime] = None,
is_demo_organization: Optional[bool] = False,
) -> Realm:
if string_id == settings.SOCIAL_AUTH_SUBDOMAIN:
raise AssertionError("Creating a realm on SOCIAL_AUTH_SUBDOMAIN is not allowed!")
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs["emails_restricted_to_domains"] = emails_restricted_to_domains
if email_address_visibility is not None:
kwargs["email_address_visibility"] = email_address_visibility
if description is not None:
kwargs["description"] = description
if invite_required is not None:
kwargs["invite_required"] = invite_required
if plan_type is not None:
kwargs["plan_type"] = plan_type
if org_type is not None:
kwargs["org_type"] = org_type
if date_created is not None:
# The date_created parameter is intended only for use by test
# suites that want to backdate the date of a realm's creation.
assert not settings.PRODUCTION
kwargs["date_created"] = date_created
with transaction.atomic():
realm = Realm(string_id=string_id, name=name, **kwargs)
if is_demo_organization:
realm.demo_organization_scheduled_deletion_date = (
realm.date_created + datetime.timedelta(days=settings.DEMO_ORG_DEADLINE_DAYS)
)
realm.save()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
)
RealmUserDefault.objects.create(realm=realm)
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm,
Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:",
acting_user=None,
)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm,
Realm.INITIAL_PRIVATE_STREAM_NAME,
invite_only=True,
stream_description="A private stream for core team members.",
acting_user=None,
)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=["notifications_stream", "signup_notifications_stream"])
if plan_type is None and settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
sender = get_system_bot(settings.NOTIFICATION_BOT, admin_realm.id)
# Send a notification to the admin realm
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def update_scheduled_email_notifications_time(
user_profile: UserProfile, old_batching_period: int, new_batching_period: int
) -> None:
existing_scheduled_emails = ScheduledMessageNotificationEmail.objects.filter(
user_profile=user_profile
)
scheduled_timestamp_change = datetime.timedelta(
seconds=new_batching_period
) - datetime.timedelta(seconds=old_batching_period)
existing_scheduled_emails.update(
scheduled_timestamp=F("scheduled_timestamp") + scheduled_timestamp_change
)
def do_change_user_setting(
user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int],
*,
acting_user: Optional[UserProfile],
) -> None:
old_value = getattr(user_profile, setting_name)
event_time = timezone_now()
if setting_name == "timezone":
assert isinstance(setting_value, str)
else:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
# TODO: Move these database actions into a transaction.atomic block.
user_profile.save(update_fields=[setting_name])
if setting_name in UserProfile.notification_setting_types:
# Prior to all personal settings being managed by property_types,
# these were only created for notification settings.
#
# TODO: Start creating these for all settings, and do a
# backfilled=True migration.
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_SETTING_CHANGED,
event_time=event_time,
acting_user=acting_user,
modified_user=user_profile,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: setting_value,
"property": setting_name,
}
).decode(),
)
# Disabling digest emails should clear a user's email queue
if setting_name == "enable_digest_emails" and not setting_value:
clear_scheduled_emails(user_profile.id, ScheduledEmail.DIGEST)
if setting_name == "email_notifications_batching_period_seconds":
assert isinstance(old_value, int)
assert isinstance(setting_value, int)
update_scheduled_email_notifications_time(user_profile, old_value, setting_value)
event = {
"type": "user_settings",
"op": "update",
"property": setting_name,
"value": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event["language_name"] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
if setting_name in UserProfile.notification_settings_legacy:
# This legacy event format is for backwards-compatiblity with
# clients that don't support the new user_settings event type.
# We only send this for settings added before Feature level 89.
legacy_event = {
"type": "update_global_notifications",
"user": user_profile.email,
"notification_name": setting_name,
"setting": setting_value,
}
send_event(user_profile.realm, legacy_event, [user_profile.id])
if setting_name in UserProfile.display_settings_legacy or setting_name == "timezone":
# This legacy event format is for backwards-compatiblity with
# clients that don't support the new user_settings event type.
# We only send this for settings added before Feature level 89.
legacy_event = {
"type": "update_display_settings",
"user": user_profile.email,
"setting_name": setting_name,
"setting": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
legacy_event["language_name"] = get_language_name(setting_value)
send_event(user_profile.realm, legacy_event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(
email=user_profile.email,
user_id=user_profile.id,
timezone=canonicalize_timezone(user_profile.timezone),
)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
if setting_name == "enable_drafts_synchronization" and setting_value is False:
# Delete all of the drafts from the backend but don't send delete events
# for them since all that's happened is that we stopped syncing changes,
# not deleted every previously synced draft - to do that use the DELETE
# endpoint.
Draft.objects.filter(user_profile=user_profile).delete()
def lookup_default_stream_groups(
default_stream_group_names: List[str], realm: Realm
) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Invalid default stream group {}").format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm)
),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(
realm: Realm, group_name: str, description: str, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name)
)
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description
)
if not created:
raise JsonableError(
_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name)
)
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
if stream in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(
realm: Realm, group: DefaultStreamGroup, new_group_name: str
) -> None:
if group.name == new_group_name:
raise JsonableError(
_("This default stream group is already named '{}'").format(new_group_name)
)
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(
realm: Realm, group: DefaultStreamGroup, new_description: str
) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [
default.stream
for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)
]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted((stream.to_dict() for stream in streams), key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted((group.to_dict() for group in groups), key=lambda elt: elt["name"])
def do_update_user_activity_interval(
user_profile: UserProfile, log_time: datetime.datetime
) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# Two intervals overlap iff each interval ends after the other
# begins. In this case, we just extend the old interval to
# include the new interval.
if log_time <= last.end and effective_end >= last.start:
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(
user_profile=user_profile, start=log_time, end=effective_end
)
@statsd_increment("user_activity")
def do_update_user_activity(
user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime
) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id=user_profile_id,
client_id=client_id,
query=query,
defaults={"last_visit": log_time, "count": count},
)
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
# Most presence data is sent to clients in the main presence
# endpoint in response to the user's own presence; this results
# data that is 1-2 minutes stale for who is online. The flaw with
# this plan is when a user comes back online and then immediately
# sends a message, recipients may still see that user as offline!
# We solve that by sending an immediate presence update clients.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/presence.html for
# internals documentation on presence.
user_ids = active_user_ids(user_profile.realm_id)
if len(user_ids) > settings.USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS:
# These immediate presence generate quadratic work for Tornado
# (linear number of users in each event and the frequency of
# users coming online grows linearly with userbase too). In
# organizations with thousands of users, this can overload
# Tornado, especially if much of the realm comes online at the
# same time.
#
# The utility of these live-presence updates goes down as
# organizations get bigger (since one is much less likely to
# be paying attention to the sidebar); so beyond a limit, we
# stop sending them at all.
return
presence_dict = presence.to_dict()
event = dict(
type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict["client"]: presence_dict},
)
send_event(user_profile.realm, event, user_ids)
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ["ZulipDesktop"]:
return get_client("website")
else:
return client
@statsd_increment("user_presence")
def do_update_user_presence(
user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int
) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile=user_profile,
client=client,
defaults=defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {"user_profile_id": user_profile.id, "time": datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(
user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int,
new_user_input: bool,
) -> None:
event = {
"user_profile_id": user_profile.id,
"status": status,
"time": datetime_to_timestamp(log_time),
"client": client.name,
}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(
user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int,
emoji_name: Optional[str],
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
if away is None:
status = None
elif away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
event = dict(
type="user_status",
user_id=user_profile.id,
)
if away is not None:
event["away"] = away
if status_text is not None:
event["status_text"] = status_text
if emoji_name is not None:
event["emoji_name"] = emoji_name
event["emoji_code"] = emoji_code
event["reaction_type"] = reaction_type
send_event(realm, event, active_user_ids(realm.id))
@dataclass
class ReadMessagesEvent:
messages: List[int]
all: bool
type: str = field(default="update_message_flags", init=False)
op: str = field(default="add", init=False)
operation: str = field(default="add", init=False)
flag: str = field(default="read", init=False)
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event("bankruptcy")
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = (
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("message_id", flat=True)[0:10000]
)
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(user_profile=user_profile).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_stream_messages_as_read(
user_profile: UserProfile, stream_recipient_id: int, topic_name: Optional[str] = None
) -> int:
log_statsd_event("mark_stream_as_read")
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
msgs = msgs.filter(message__recipient_id=stream_recipient_id)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list("message_id", flat=True))
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_muted_user_messages_as_read(
user_profile: UserProfile,
muted_user: UserProfile,
) -> int:
messages = UserMessage.objects.filter(
user_profile=user_profile, message__sender=muted_user
).extra(where=[UserMessage.where_unread()])
message_ids = list(messages.values_list("message_id", flat=True))
count = messages.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_update_mobile_push_notification(
message: Message,
prior_mention_user_ids: Set[int],
mentions_user_ids: Set[int],
stream_push_user_ids: Set[int],
) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vice versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(
user_profile_ids: List[int], message_ids: List[int]
) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(
UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("user_profile_id", "message_id")
)
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish(
"missedmessage_mobile_notifications",
{
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
},
)
def do_update_message_flags(
user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]
) -> int:
valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
if operation not in ("add", "remove"):
raise JsonableError(_("Invalid message flag operation: '{}'").format(operation))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile, message_id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
if operation == "add":
count = msgs.update(flags=F("flags").bitor(flagattr))
elif operation == "remove":
count = msgs.update(flags=F("flags").bitand(~flagattr))
event = {
"type": "update_message_flags",
"op": operation,
"operation": operation,
"flag": flag,
"messages": messages,
"all": False,
}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def maybe_send_resolve_topic_notifications(
*,
user_profile: UserProfile,
stream: Stream,
old_topic: str,
new_topic: str,
) -> None:
# Note that topics will have already been stripped in check_update_message.
#
# This logic is designed to treat removing a weird "✔ ✔✔ "
# prefix as unresolving the topic.
if old_topic.lstrip(RESOLVED_TOPIC_PREFIX) != new_topic.lstrip(RESOLVED_TOPIC_PREFIX):
return
if new_topic.startswith(RESOLVED_TOPIC_PREFIX) and not old_topic.startswith(
RESOLVED_TOPIC_PREFIX
):
notification_string = _("{user} has marked this topic as resolved.")
elif old_topic.startswith(RESOLVED_TOPIC_PREFIX) and not new_topic.startswith(
RESOLVED_TOPIC_PREFIX
):
notification_string = _("{user} has marked this topic as unresolved.")
else:
# If there's some other weird topic that does not toggle the
# state of "topic starts with RESOLVED_TOPIC_PREFIX", we do
# nothing. Any other logic could result in cases where we send
# these notifications in a non-alternating fashion.
#
# Note that it is still possible for an individual topic to
# have multiple "This topic was marked as resolved"
# notifications in a row: one can send new messages to the
# pre-resolve topic and then resolve the topic created that
# way to get multiple in the resolved topic. And then an
# administrator can the messages in between. We consider this
# to be a fundamental risk of irresponsible message deletion,
# not a bug with the "resolve topics" feature.
return
sender = get_system_bot(settings.NOTIFICATION_BOT, user_profile.realm_id)
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
with override_language(stream.realm.default_language):
internal_send_stream_message(
sender,
stream,
new_topic,
notification_string.format(
user=user_mention,
),
)
def send_message_moved_breadcrumbs(
user_profile: UserProfile,
old_stream: Stream,
old_topic: str,
old_thread_notification_string: Optional[str],
new_stream: Stream,
new_topic: Optional[str],
new_thread_notification_string: Optional[str],
) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT, old_stream.realm_id)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if new_thread_notification_string is not None:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
sender,
new_stream,
new_topic,
new_thread_notification_string.format(
old_location=old_topic_link,
user=user_mention,
),
)
if old_thread_notification_string is not None:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
sender,
old_stream,
old_topic,
old_thread_notification_string.format(
user=user_mention,
new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values("user_profile_id", "flags")
rows = list(query)
message_user_ids = {row["user_profile_id"] for row in rows}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {row["user_profile_id"] for row in rows if int(row["flags"]) & mask}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(
rendering_result: MessageRenderingResult, ums: Iterable[UserMessage]
) -> None:
wildcard = rendering_result.mentions_wildcard
mentioned_ids = rendering_result.mentions_user_ids
ids_with_alert_words = rendering_result.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if um.flags & flag:
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=["flags"])
def update_to_dict_cache(
changed_messages: List[Message], realm_id: Optional[int] = None
) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
def do_update_embedded_data(
user_profile: UserProfile,
message: Message,
content: Optional[str],
rendering_result: MessageRenderingResult,
) -> None:
event: Dict[str, Any] = {"type": "update_message", "message_id": message.id}
changed_messages = [message]
rendered_content: Optional[str] = None
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(rendering_result, ums)
rendered_content = rendering_result.rendered_content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event["message_ids"] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(
user_profile: UserProfile,
target_message: Message,
new_stream: Optional[Stream],
topic_name: Optional[str],
propagate_mode: str,
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool,
content: Optional[str],
rendering_result: Optional[MessageRenderingResult],
prior_mention_user_ids: Set[int],
mention_data: Optional[MentionData] = None,
) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
target_message.last_edit_time = timestamp
event: Dict[str, Any] = {
"type": "update_message",
"user_id": user_profile.id,
"edit_timestamp": datetime_to_timestamp(timestamp),
"message_id": target_message.id,
}
edit_history_event: Dict[str, Any] = {
"user_id": user_profile.id,
"timestamp": event["edit_timestamp"],
}
changed_messages = [target_message]
realm = user_profile.realm
stream_being_edited = None
if target_message.is_stream_message():
stream_id = target_message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, realm)
event["stream_name"] = stream_being_edited.name
ums = UserMessage.objects.filter(message=target_message.id)
if content is not None:
assert rendering_result is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
update_user_message_flags(rendering_result, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event["orig_content"] = target_message.content
event["orig_rendered_content"] = target_message.rendered_content
edit_history_event["prev_content"] = target_message.content
edit_history_event["prev_rendered_content"] = target_message.rendered_content
edit_history_event[
"prev_rendered_content_version"
] = target_message.rendered_content_version
target_message.content = content
target_message.rendered_content = rendering_result.rendered_content
target_message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendering_result.rendered_content
event["prev_rendered_content_version"] = target_message.rendered_content_version
event["is_me_message"] = Message.is_status_message(
content, rendering_result.rendered_content
)
# target_message.has_image and target_message.has_link will have been
# already updated by Markdown rendering in the caller.
target_message.has_attachment = check_attachment_reference_change(
target_message, rendering_result
)
if target_message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = target_message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=target_message.recipient,
sender_id=target_message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event["online_push_user_ids"] = list(info["online_push_user_ids"])
event["pm_mention_push_disabled_user_ids"] = list(info["pm_mention_push_disabled_user_ids"])
event["pm_mention_email_disabled_user_ids"] = list(
info["pm_mention_email_disabled_user_ids"]
)
event["stream_push_user_ids"] = list(info["stream_push_user_ids"])
event["stream_email_user_ids"] = list(info["stream_email_user_ids"])
event["muted_sender_user_ids"] = list(info["muted_sender_user_ids"])
event["prior_mention_user_ids"] = list(prior_mention_user_ids)
event["presence_idle_user_ids"] = filter_presence_idle_user_ids(info["active_user_ids"])
if rendering_result.mentions_wildcard:
event["wildcard_mention_user_ids"] = list(info["wildcard_mention_user_ids"])
else:
event["wildcard_mention_user_ids"] = []
do_update_mobile_push_notification(
target_message,
prior_mention_user_ids,
rendering_result.mentions_user_ids,
info["stream_push_user_ids"],
)
if topic_name is not None or new_stream is not None:
orig_topic_name = target_message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = target_message.recipient.type_id
if new_stream is not None:
assert content is None
assert target_message.is_stream_message()
assert stream_being_edited is not None
edit_history_event["prev_stream"] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
target_message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subs_to_old_stream = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=True
).select_related("user_profile")
subs_to_new_stream = list(
get_active_subscriptions_for_stream_id(
new_stream.id, include_deactivated_users=True
).select_related("user_profile")
)
old_stream_sub_ids = [user.user_profile_id for user in subs_to_old_stream]
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subs_to_old_stream if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
subs_losing_access = [
sub
for sub in subs_losing_usermessages
if sub.user_profile.is_guest or not new_stream.is_public()
]
ums = ums.exclude(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages]
)
subs_gaining_usermessages = []
if not new_stream.is_history_public_to_subscribers():
# For private streams, with history not public to subscribers,
# We find out users who are not present in the msgs' old stream
# and create new UserMessage for these users so that they can
# access this message.
subs_gaining_usermessages += [
user_id for user_id in new_stream_sub_ids if user_id not in old_stream_sub_ids
]
if topic_name is not None:
topic_name = truncate_topic(topic_name)
target_message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(target_message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
update_edit_history(target_message, timestamp, edit_history_event)
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
assert stream_being_edited is not None
# Other messages should only get topic/stream fields in their edit history.
topic_only_edit_history_event = {
k: v
for (k, v) in edit_history_event.items()
if k
not in [
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
]
}
messages_list = update_messages_for_topic_edit(
acting_user=user_profile,
edited_message=target_message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
old_stream=stream_being_edited,
edit_history_event=topic_only_edit_history_event,
last_edit_time=timestamp,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
changed_message_ids = [msg.id for msg in changed_messages]
if subs_gaining_usermessages:
ums_to_create = []
for message_id in changed_message_ids:
for user_profile_id in subs_gaining_usermessages:
# The fact that the user didn't have a UserMessage originally means we can infer that the user
# was not mentioned in the original message (even if mention syntax was present, it would not
# take effect for a user who was not subscribed). If we were editing the message's content, we
# would rerender the message and then use the new stream's data to determine whether this is
# a mention of a subscriber; but as we are not doing so, we choose to preserve the "was this
# mention syntax an actual mention" decision made during the original rendering for implementation
# simplicity. As a result, the only flag to consider applying here is read.
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message_id,
flags=UserMessage.flags.read,
)
ums_to_create.append(um)
bulk_insert_ums(ums_to_create)
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages],
message_id__in=changed_message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": changed_message_ids,
"message_type": "stream",
"stream_id": stream_being_edited.id,
"topic": orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=target_message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event["message_ids"] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscriptions = subscriptions.exclude(
user_profile_id__in=[um.user_profile_id for um in ums]
)
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscriptions = subscriptions.exclude(
user_profile_id__in=delete_event_notify_user_ids
)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error trackeback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub
for sub in subs_to_new_stream
if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids
]
subscriptions = subscriptions.exclude(
user_profile_id__in=[sub.user_profile_id for sub in old_stream_unsubbed_guests]
)
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
users_to_be_notified += list(map(subscriber_info, sorted(list(subscriber_ids))))
send_event(user_profile.realm, event, users_to_be_notified)
if len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None:
# Notify users that the topic was moved.
old_thread_notification_string = None
if send_notification_to_old_thread:
old_thread_notification_string = _("This topic was moved by {user} to {new_location}")
new_thread_notification_string = None
if send_notification_to_new_thread:
new_thread_notification_string = _(
"This topic was moved here from {old_location} by {user}"
)
send_message_moved_breadcrumbs(
user_profile,
stream_being_edited,
orig_topic_name,
old_thread_notification_string,
new_stream,
topic_name,
new_thread_notification_string,
)
if (
topic_name is not None
and new_stream is None
and content is None
and len(changed_messages) > 0
):
assert stream_being_edited is not None
maybe_send_resolve_topic_notifications(
user_profile=user_profile,
stream=stream_being_edited,
old_topic=orig_topic_name,
new_topic=topic_name,
)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event["stream_id"] = stream_id
event["topic"] = sample_message.topic_name()
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event["message_type"] = message_type
transaction.on_commit(lambda: send_event(realm, event, users_to_notify))
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(
Message.objects.filter(sender=user).values_list("id", flat=True).order_by("id")
)
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS["messages_in_stream:is_bot:day"]
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values("stream_id").annotate(value=Sum("value"))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(
stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]
) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def get_web_public_subs(realm: Realm) -> SubscriptionInfo:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict["is_muted"] = False
stream_dict["color"] = get_next_color()
stream_dict["desktop_notifications"] = True
stream_dict["audible_notifications"] = True
stream_dict["push_notifications"] = True
stream_dict["email_notifications"] = True
stream_dict["pin_to_top"] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(
stream.id, stream.date_created, {}
)
stream_dict["stream_weekly_traffic"] = stream_weekly_traffic
stream_dict["email_address"] = ""
subscribed.append(stream_dict)
return SubscriptionInfo(
subscriptions=subscribed,
unsubscribed=[],
never_subscribed=[],
)
def build_stream_dict_for_sub(
user: UserProfile,
sub_dict: RawSubscriptionDict,
raw_stream_dict: RawStreamDict,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = raw_stream_dict["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(raw_stream_dict[field_name])
continue
result[field_name] = raw_stream_dict[field_name]
# Copy Subscription.API_FIELDS.
for field_name in Subscription.API_FIELDS:
result[field_name] = sub_dict[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
result["in_home_view"] = not result["is_muted"]
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
result["is_announcement_only"] = (
raw_stream_dict["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Add a few computed fields not directly from the data models.
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
raw_stream_dict["id"], raw_stream_dict["date_created"], recent_traffic
)
result["email_address"] = encode_email_address_helper(
raw_stream_dict["name"], raw_stream_dict["email_token"], show_sender=True
)
# Our caller may add a subscribers field.
return result
def build_stream_dict_for_never_sub(
raw_stream_dict: RawStreamDict,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = raw_stream_dict["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(raw_stream_dict[field_name])
continue
result[field_name] = raw_stream_dict[field_name]
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
raw_stream_dict["id"], raw_stream_dict["date_created"], recent_traffic
)
# Backwards-compatibility addition of removed field.
result["is_announcement_only"] = (
raw_stream_dict["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Our caller may add a subscribers field.
return result
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(
user_profile: UserProfile,
include_subscribers: bool = True,
) -> SubscriptionInfo:
realm = user_profile.realm
all_streams: QuerySet[RawStreamDict] = get_active_streams(realm).values(
*Stream.API_FIELDS,
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token",
)
recip_id_to_stream_id: Dict[int, int] = {
stream["recipient_id"]: stream["id"] for stream in all_streams
}
all_streams_map: Dict[int, RawStreamDict] = {stream["id"]: stream for stream in all_streams}
sub_dicts_query: Iterable[RawSubscriptionDict] = (
get_stream_subscriptions_for_user(user_profile)
.values(
*Subscription.API_FIELDS,
"recipient_id",
"active",
)
.order_by("recipient_id")
)
# We only care about subscriptions for active streams.
sub_dicts: List[RawSubscriptionDict] = [
sub_dict
for sub_dict in sub_dicts_query
if recip_id_to_stream_id.get(sub_dict["recipient_id"])
]
def get_stream_id(sub_dict: RawSubscriptionDict) -> int:
return recip_id_to_stream_id[sub_dict["recipient_id"]]
traffic_stream_ids = {get_stream_id(sub_dict) for sub_dict in sub_dicts}
recent_traffic = get_streams_traffic(stream_ids=traffic_stream_ids)
# Okay, now we finally get to populating our main results, which
# will be these three lists.
subscribed = []
unsubscribed = []
never_subscribed = []
sub_unsub_stream_ids = set()
for sub_dict in sub_dicts:
stream_id = get_stream_id(sub_dict)
sub_unsub_stream_ids.add(stream_id)
raw_stream_dict = all_streams_map[stream_id]
stream_dict = build_stream_dict_for_sub(
user=user_profile,
sub_dict=sub_dict,
raw_stream_dict=raw_stream_dict,
recent_traffic=recent_traffic,
)
# is_active is represented in this structure by which list we include it in.
is_active = sub_dict["active"]
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = set(all_streams_map) - sub_unsub_stream_ids
else:
web_public_stream_ids = {stream["id"] for stream in all_streams if stream["is_web_public"]}
never_subscribed_stream_ids = web_public_stream_ids - sub_unsub_stream_ids
never_subscribed_streams = [
all_streams_map[stream_id] for stream_id in never_subscribed_stream_ids
]
for raw_stream_dict in never_subscribed_streams:
is_public = not raw_stream_dict["invite_only"]
if is_public or user_profile.is_realm_admin:
stream_dict = build_stream_dict_for_never_sub(
raw_stream_dict=raw_stream_dict, recent_traffic=recent_traffic
)
never_subscribed.append(stream_dict)
if include_subscribers:
# The highly optimized bulk_get_subscriber_user_ids wants to know which
# streams we are subscribed to, for validation purposes, and it uses that
# info to know if it's allowed to find OTHER subscribers.
subscribed_stream_ids = {
get_stream_id(sub_dict) for sub_dict in sub_dicts if sub_dict["active"]
}
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
subscribed_stream_ids,
)
for lst in [subscribed, unsubscribed, never_subscribed]:
for stream_dict in lst:
assert isinstance(stream_dict["stream_id"], int)
stream_id = stream_dict["stream_id"]
stream_dict["subscribers"] = subscriber_map[stream_id]
return SubscriptionInfo(
subscriptions=sorted(subscribed, key=lambda x: x["name"]),
unsubscribed=sorted(unsubscribed, key=lambda x: x["name"]),
never_subscribed=sorted(never_subscribed, key=lambda x: x["name"]),
)
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool = False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
helper_result = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
subscribed = helper_result.subscriptions
unsubscribed = helper_result.unsubscribed
return (subscribed, unsubscribed)
class ActivePresenceIdleUserData(TypedDict):
alerted: bool
notifications_data: UserMessageNotificationsData
def get_active_presence_idle_user_ids(
realm: Realm,
sender_id: int,
active_users_data: List[ActivePresenceIdleUserData],
) -> List[int]:
"""
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications.
* They are no longer "present" according to the
UserPresence table.
"""
if realm.presence_disabled:
return []
user_ids = set()
for user_data in active_users_data:
user_notifications_data: UserMessageNotificationsData = user_data["notifications_data"]
alerted = user_data["alerted"]
# We only need to know the presence idle state for a user if this message would be notifiable
# for them if they were indeed idle. Only including those users in the calculation below is a
# very important optimization for open communities with many inactive users.
if user_notifications_data.is_notifiable(sender_id, idle=True) or alerted:
user_ids.add(user_notifications_data.user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = (
UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
)
.exclude(client__name="ZulipMobile")
.distinct("user_profile_id")
.values("user_profile_id")
)
active_user_ids = {row["user_profile_id"] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(idle_user_ids)
def do_send_confirmation_email(
invitee: PreregistrationUser,
referrer: UserProfile,
email_language: str,
invite_expires_in_days: Optional[int] = None,
) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(
invitee, Confirmation.INVITATION, validity_in_days=invite_expires_in_days
)
context = {
"referrer_full_name": referrer.full_name,
"referrer_email": referrer.delivery_email,
"activate_url": activation_url,
"referrer_realm_name": referrer.realm.name,
}
send_email(
"zerver/emails/invitation",
to_emails=[invitee.email],
from_address=FromAddress.tokenized_no_reply_address(),
language=email_language,
context=context,
realm=referrer.realm,
)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
def estimate_recent_invites(realms: Collection[Realm], *, days: int) -> int:
"""An upper bound on the number of invites sent in the last `days` days"""
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property="invites_sent::day",
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum("value"))["value__sum"]
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
"""Discourage using invitation emails as a vector for carrying spam."""
msg = _(
"You do not have enough remaining invites for today. "
"Please contact {email} to have your limit raised. "
"No invitations were sent."
).format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(
user_profile: UserProfile,
invitee_emails: Collection[str],
streams: Collection[Stream],
*,
invite_expires_in_days: int,
invite_as: int = PreregistrationUser.INVITE_AS["MEMBER"],
) -> None:
num_invites = len(invitee_emails)
check_invite_limit(user_profile.realm, num_invites)
if settings.BILLING_ENABLED:
from corporate.lib.registration import check_spare_licenses_available_for_inviting_new_users
check_spare_licenses_available_for_inviting_new_users(user_profile.realm, num_invites)
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin:
raise InvitationError(
_(
"Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."
),
[],
sent_invitations=False,
)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == "":
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
"""
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
"""
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped,
sent_invitations=False,
)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(
_("We weren't able to invite anyone."), skipped, sent_invitations=False
)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["invites_sent::day"],
None,
timezone_now(),
increment=len(validated_emails),
)
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(
email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm
)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {
"prereg_id": prereg_user.id,
"referrer_id": user_profile.id,
"email_language": user_profile.realm.default_language,
"invite_expires_in_days": invite_expires_in_days,
}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(
_(
"Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"
),
skipped,
sent_invitations=True,
)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
expiry_date = invitee.confirmation.get().expiry_date
invites.append(
dict(
email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
expiry_date=datetime_to_timestamp(expiry_date),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False,
)
)
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
multiuse_confirmation_objs = Confirmation.objects.filter(
realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE, expiry_date__gte=timezone_now()
)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
assert invite is not None
invites.append(
dict(
invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
expiry_date=datetime_to_timestamp(confirmation_obj.expiry_date),
id=invite.id,
link_url=confirmation_url(
confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE,
),
invited_as=invite.invited_as,
is_multiuse=True,
)
)
return invites
def do_create_multiuse_invite_link(
referred_by: UserProfile,
invited_as: int,
invite_expires_in_days: int,
streams: Sequence[Stream] = [],
) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(
invite, Confirmation.MULTIUSE_INVITE, validity_in_days=invite_expires_in_days
)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
invite_expires_in_days = (
prereg_user.confirmation.get().expiry_date - prereg_user.invited_at
).days
prereg_user.confirmation.clear()
do_increment_logging_stat(
prereg_user.realm, COUNT_STATS["invites_sent::day"], None, prereg_user.invited_at
)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {
"prereg_id": prereg_user.id,
"referrer_id": prereg_user.referred_by.id,
"email_language": prereg_user.referred_by.realm.default_language,
"invite_expires_in_days": invite_expires_in_days,
}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(
realm: Realm, name: str, author: UserProfile, image_file: IO[bytes]
) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
is_animated = False
try:
is_animated = upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.is_animated = is_animated
realm_emoji.save(update_fields=["file_name", "is_animated"])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=["deactivated"])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Sequence[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(
user_profile: UserProfile,
stream: Stream,
topic: str,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, stream.recipient_id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mute_user(
user_profile: UserProfile,
muted_user: UserProfile,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_user_mute(user_profile, muted_user, date_muted)
do_mark_muted_user_messages_as_read(user_profile, muted_user)
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_MUTED,
event_time=date_muted,
extra_data=orjson.dumps({"muted_user_id": muted_user.id}).decode(),
)
def do_unmute_user(mute_object: MutedUser) -> None:
user_profile = mute_object.user_profile
muted_user = mute_object.muted_user
mute_object.delete()
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_UNMUTED,
event_time=timezone_now(),
extra_data=orjson.dumps({"unmuted_user_id": muted_user.id}).decode(),
)
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_linkifiers(realm: Realm) -> None:
realm_linkifiers = linkifiers_for_realm(realm.id)
event = dict(type="realm_linkifiers", realm_linkifiers=realm_linkifiers)
send_event(realm, event, active_user_ids(realm.id))
# Below is code for backwards compatibility. The now deprecated
# "realm_filters" event-type is used by older clients, and uses
# tuples.
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_linkifier(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter(realm=realm, pattern=pattern, url_format_string=url_format_string)
linkifier.full_clean()
linkifier.save()
notify_linkifiers(realm)
return linkifier.id
def do_remove_linkifier(
realm: Realm, pattern: Optional[str] = None, id: Optional[int] = None
) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, id=id).delete()
notify_linkifiers(realm)
def do_update_linkifier(realm: Realm, id: int, pattern: str, url_format_string: str) -> None:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter.objects.get(realm=realm, id=id)
linkifier.pattern = pattern
linkifier.url_format_string = url_format_string
linkifier.full_clean()
linkifier.save(update_fields=["pattern", "url_format_string"])
notify_linkifiers(realm)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(
realm=realm, domain=domain, allow_subdomains=allow_subdomains
)
event = dict(
type="realm_domains",
op="add",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=["allow_subdomains"])
event = dict(
type="realm_domains",
op="change",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(
realm_domain: RealmDomain, *, acting_user: Optional[UserProfile]
) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, "emails_restricted_to_domains", False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def notify_realm_playgrounds(realm: Realm) -> None:
event = dict(type="realm_playgrounds", realm_playgrounds=get_realm_playgrounds(realm))
send_event(realm, event, active_user_ids(realm.id))
def do_add_realm_playground(realm: Realm, **kwargs: Any) -> int:
realm_playground = RealmPlayground(realm=realm, **kwargs)
# We expect full_clean to always pass since a thorough input validation
# is performed in the view (using check_url, check_pygments_language, etc)
# before calling this function.
realm_playground.full_clean()
realm_playground.save()
notify_realm_playgrounds(realm)
return realm_playground.id
def do_remove_realm_playground(realm: Realm, realm_playground: RealmPlayground) -> None:
realm_playground.delete()
notify_realm_playgrounds(realm)
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Get streams with subscribers"""
exists_expression = Exists(
Subscription.objects.filter(
active=True,
is_user_active=True,
user_profile__realm=realm,
recipient_id=OuterRef("recipient_id"),
),
)
occupied_streams = (
Stream.objects.filter(realm=realm, deactivated=False)
.annotate(occupied=exists_expression)
.filter(occupied=True)
)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: # nocoverage
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile,
include_public: bool = True,
include_web_public: bool = False,
include_subscribed: bool = True,
include_all_active: bool = False,
include_default: bool = False,
include_owner_subscribed: bool = False,
) -> List[Dict[str, Any]]:
# This function is only used by API clients now.
if include_all_active and not user_profile.is_realm_admin:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all active streams in the realm.
query = Stream.objects.filter(realm=user_profile.realm, deactivated=False)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_web_public:
web_public_check = Q(is_web_public=True)
add_filter_option(web_public_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream["is_default"] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(
user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]
) -> None:
event = {
"type": "attachment",
"op": op,
"attachment": attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
is_message_web_public = False
if message.is_stream_message():
stream = Stream.objects.get(id=message.recipient.type_id)
is_message_realm_public = stream.is_public()
is_message_web_public = stream.is_web_public
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id,
path_id,
message.id,
)
continue
claimed = True
attachment = claim_attachment(
user_profile, path_id, message, is_message_realm_public, is_message_web_public
)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(
message: Message, rendering_result: MessageRenderingResult
) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(rendering_result.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields", fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(
realm: Realm, field_subtype: str
) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
custom_profile_field = CustomProfileField(
realm=realm,
name=field_data["name"],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data["hint"],
field_data=orjson.dumps(dict(subtype=field_subtype)).decode(),
)
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def try_add_realm_custom_profile_field(
realm: Realm,
name: str,
field_type: int,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> CustomProfileField:
custom_profile_field = CustomProfileField(realm=realm, name=name, field_type=field_type)
custom_profile_field.hint = hint
if (
custom_profile_field.field_type == CustomProfileField.SELECT
or custom_profile_field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
custom_profile_field.field_data = orjson.dumps(field_data or {}).decode()
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm)
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(
realm: Realm,
field: CustomProfileField,
name: str,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> None:
field.name = name
field.hint = hint
if (
field.field_type == CustomProfileField.SELECT
or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
field.field_data = orjson.dumps(field_data or {}).decode()
field.save()
notify_realm_custom_profile_fields(realm)
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
custom_profile_fields = CustomProfileField.objects.filter(realm=realm)
for custom_profile_field in custom_profile_fields:
if custom_profile_field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for custom_profile_field in custom_profile_fields:
custom_profile_field.order = order_mapping[custom_profile_field.id]
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
def notify_user_update_custom_profile_data(
user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]
) -> None:
data = dict(id=field["id"], value=field["value"])
if field["rendered_value"]:
data["rendered_value"] = field["rendered_value"]
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(
user_profile: UserProfile,
data: List[Dict[str, Union[int, ProfileDataElementValue]]],
) -> None:
with transaction.atomic():
for custom_profile_field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile, field_id=custom_profile_field["id"]
)
# field_value.value is a TextField() so we need to have field["value"]
# in string form to correctly make comparisons and assignments.
if isinstance(custom_profile_field["value"], str):
custom_profile_field_value_string = custom_profile_field["value"]
else:
custom_profile_field_value_string = orjson.dumps(
custom_profile_field["value"]
).decode()
if not created and field_value.value == custom_profile_field_value_string:
# If the field value isn't actually being changed to a different one,
# we have nothing to do here for this field.
continue
field_value.value = custom_profile_field_value_string
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(
custom_profile_field_value_string
)
field_value.save(update_fields=["value", "rendered_value"])
else:
field_value.save(update_fields=["value"])
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type,
},
)
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
custom_profile_field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(
field=custom_profile_field, user_profile=user_profile
)
field_value.delete()
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_id,
"value": None,
"rendered_value": None,
"type": custom_profile_field.field_type,
},
)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(
type="user_group",
op="add",
group=dict(
name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
is_system_group=user_group.is_system_group,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(
realm: Realm, name: str, initial_members: List[UserProfile], description: str
) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, str]) -> None:
event = dict(type="user_group", op="update", group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=["name"])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=["description"])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(
bot_profile: UserProfile, service_interface: int, service_payload_url: str
) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[
dict(
base_url=service.base_url, interface=service.interface, token=service.token
)
],
),
),
bot_owner_user_ids(bot_profile),
)
def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile),
)
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [
{
"config_data": get_bot_config(user_profile),
"service_name": services[0].name,
}
]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(
bot_dicts: List[Dict[str, Any]], realm: Realm
) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict["id"] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [
bot_dict["id"] for bot_dict in bot_dicts if bot_dict["bot_type"] == UserProfile.EMBEDDED_BOT
]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [
{
"config_data": bot_config,
"service_name": services[0].name,
}
]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(
user_profile: UserProfile, include_all_realm_bots_if_admin: bool = True
) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(
realm=user_profile.realm, is_bot=True, bot_owner=user_profile
).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [
{
"email": botdict["email"],
"user_id": botdict["id"],
"full_name": botdict["full_name"],
"bot_type": botdict["bot_type"],
"is_active": botdict["is_active"],
"api_key": botdict["api_key"],
"default_sending_stream": botdict["default_sending_stream__name"],
"default_events_register_stream": botdict["default_events_register_stream__name"],
"default_all_public_streams": botdict["default_all_public_streams"],
"owner_id": botdict["bot_owner_id"],
"avatar_url": avatar_url_from_dict(botdict),
"services": services_by_ids[botdict["id"]],
}
for botdict in result
]
def do_send_user_group_members_update_event(
event_name: str, user_group: UserGroup, user_ids: List[int]
) -> None:
event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
memberships = [
UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile)
for user_profile in user_profiles
]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("add_members", user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id, user_profile__in=user_profiles
).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("remove_members", user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None:
event = dict(type="user_group", op="remove", group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT,
event_time=timezone_now(),
)
context = {"confirmation_url": url, "realm_uri": realm.uri, "realm_name": realm.name}
language = realm.default_language
send_email_to_admins(
"zerver/emails/realm_reactivation",
realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language,
context=context,
)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm,
dict(type="has_zoom_token", value=token is not None),
[user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type="realm_export", exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `orjson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = orjson.loads(export_extra_data)
export_path = export_data.get("export_path")
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update(deleted_timestamp=timezone_now().timestamp())
export.extra_data = orjson.dumps(export_data).decode()
export.save(update_fields=["extra_data"])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
| rht/zulip | zerver/lib/actions.py | Python | apache-2.0 | 296,769 | [
"Octopus"
] | 832e93507d1e8bd3102d83231e5e41a918e3cf0b08df13d2b19f95b7a5bbb730 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2007 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for Gramps.
"""
from ._allnotes import AllNotes
from ._hasidof import HasIdOf
from ._regexpidof import RegExpIdOf
from ._matchesregexpof import MatchesRegexpOf
from ._matchessubstringof import MatchesSubstringOf
from ._hasreferencecountof import HasReferenceCountOf
from ._noteprivate import NotePrivate
from ._matchesfilter import MatchesFilter
from ._hasnote import HasNote
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._hastype import HasType
editor_rule_list = [
AllNotes,
HasIdOf,
RegExpIdOf,
HasNote,
MatchesRegexpOf,
HasReferenceCountOf,
NotePrivate,
MatchesFilter,
ChangedSince,
HasTag,
HasType,
]
| prculley/gramps | gramps/gen/filters/rules/note/__init__.py | Python | gpl-2.0 | 1,568 | [
"Brian"
] | ccbd12ccec0833c5b7ddb1dd69342e544b529b7e182e769dca0b9191c333fd97 |
"""
Module for running simulations
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import round
from builtins import range
from builtins import str
from future import standard_library
standard_library.install_aliases()
import numpy as np
from neuron import h, init # Import NEURON
from . import utils
#------------------------------------------------------------------------------
# Commands required just before running simulation
#------------------------------------------------------------------------------
def preRun():
"""
Function for/to <short description of `netpyne.sim.run.preRun`>
"""
from .. import sim
# set initial v of cells
for cell in sim.net.cells:
sim.fih.append(h.FInitializeHandler(0, cell.initV))
# cvode variables
sim.cvode.active(int(sim.cfg.cvode_active))
sim.cvode.cache_efficient(int(sim.cfg.cache_efficient))
sim.cvode.atol(sim.cfg.cvode_atol)
sim.cvode.use_fast_imem(sim.cfg.use_fast_imem)
# set h global params
sim.setGlobals()
# set h.dt
h.dt = sim.cfg.dt
if sim.cfg.coreneuron:
sim.cfg.random123 = True
# set v_init if doesn't exist
if 'v_init' not in sim.cfg.hParams: sim.cfg.hParams['v_init'] = -65.0
# parallelcontext vars
sim.pc.set_maxstep(10)
mindelay = sim.pc.allreduce(sim.pc.set_maxstep(10), 2) # flag 2 returns minimum value
if sim.rank==0 and sim.cfg.verbose: print(('Minimum delay (time-step for queue exchange) is %.2f'%(mindelay)))
sim.pc.setup_transfer() # setup transfer of source_var to target_var
# handler for printing out time during simulation run
if sim.rank == 0 and sim.cfg.printRunTime:
def printRunTime():
print('%.1fs' % (h.t/1000.0))
sim.cvode.event(h.t + int(sim.cfg.printRunTime*1000.0), sim.printRunTime)
sim.printRunTime = printRunTime
sim.fih.append(h.FInitializeHandler(1, sim.printRunTime))
# set global index used by all instances of the Random123 instances of Random
if sim.cfg.rand123GlobalIndex is not None:
rand = h.Random()
rand.Random123_globalindex(int(sim.cfg.rand123GlobalIndex))
# reset all netstim randomizers so runs are always equivalent
for cell in sim.net.cells:
if cell.tags.get('cellModel') == 'NetStim':
#cell.hRandom.Random123(sim.hashStr('NetStim'), cell.gid, cell.params['seed'])
if sim.cfg.random123:
cell.hPointp.noiseFromRandom123(utils.hashStr('NetStim'), cell.gid, cell.params['seed'])
else:
utils._init_stim_randomizer(cell.hRandom, 'NetStim', cell.gid, cell.params['seed'])
cell.hRandom.negexp(1)
cell.hPointp.noiseFromRandom(cell.hRandom)
pop = sim.net.pops[cell.tags['pop']]
if 'originalFormat' in pop.tags and pop.tags['originalFormat'] == 'NeuroML2_SpikeSource':
if sim.cfg.verbose: print("== Setting random generator in NeuroML spike generator")
cell.initRandom()
else:
for stim in cell.stims:
if 'hRandom' in stim:
#stim['hRandom'].Random123(sim.hashStr(stim['source']), cell.gid, stim['seed'])
if not sim.cfg.random123:
utils._init_stim_randomizer(stim['hRandom'], stim['type'], cell.gid, stim['seed'])
stim['hRandom'].negexp(1)
# Check if noiseFromRandom is in stim['hObj']; see https://github.com/Neurosim-lab/netpyne/issues/219
if not isinstance(stim['hObj'].noiseFromRandom, dict):
if sim.cfg.random123:
stim['hObj'].noiseFromRandom123(sim.hashStr(stim['type']), cell.gid, stim['seed'])
else:
stim['hObj'].noiseFromRandom(stim['hRandom'])
# handler for recording LFP
if sim.cfg.recordLFP:
def recordLFPHandler():
sim.cvode.event(h.t + float(sim.cfg.recordStep), sim.calculateLFP)
sim.cvode.event(h.t + float(sim.cfg.recordStep), recordLFPHandler)
sim.recordLFPHandler = recordLFPHandler
sim.fih.append(h.FInitializeHandler(0, sim.recordLFPHandler)) # initialize imemb
#------------------------------------------------------------------------------
# Run Simulation
#------------------------------------------------------------------------------
def runSim(skipPreRun=False):
"""
Function for/to <short description of `netpyne.sim.run.runSim`>
Parameters
----------
skipPreRun : bool
<Short description of skipPreRun>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
sim.pc.barrier()
if hasattr(sim.cfg,'use_local_dt') and sim.cfg.use_local_dt:
try:
sim.cvode.use_local_dt(1)
if sim.cfg.verbose: print('Using local dt.')
except:
if sim.cfg.verbose: 'Error Failed to use local dt.'
sim.pc.barrier()
sim.timing('start', 'runTime')
if not skipPreRun:
preRun()
h.finitialize(float(sim.cfg.hParams['v_init']))
if sim.cfg.coreneuron == True:
if sim.rank == 0: print('\nRunning simulation using CoreNEURON for %s ms...'%sim.cfg.duration)
from neuron import coreneuron
coreneuron.enable = True
if sim.cfg.gpu == True:
coreneuron.gpu = True
coreneuron.cell_permute = 2
else:
if sim.rank == 0: print('\nRunning simulation using NEURON for %s ms...'%sim.cfg.duration)
sim.pc.psolve(sim.cfg.duration)
sim.pc.barrier() # Wait for all hosts to get to this point
sim.timing('stop', 'runTime')
if sim.rank==0:
print(' Done; run time = %0.2f s; real-time ratio: %0.2f.' %
(sim.timingData['runTime'], sim.cfg.duration/1000/sim.timingData['runTime']))
#------------------------------------------------------------------------------
# Run Simulation with a function executed at intervals
#------------------------------------------------------------------------------
def runSimWithIntervalFunc(interval, func, timeRange=None, funcArgs=None):
"""
Function to run a simulation while executing a function at intervals
Parameters
----------
interval : float
Time interval (ms) at which to execute the function
**Default:** *required*
func : function
The function to be executed at intervals
**Default:** *required*
timeRange : list
Time range during which to execute the function [intervalStart, intervalStop]
**Default:** `None` uses the entire simulation duration
funcArgs: dict
A dictionary of keyword arguments to feed into the function.
**Default:** `None`
"""
from .. import sim
sim.pc.barrier()
sim.timing('start', 'runTime')
preRun()
h.finitialize(float(sim.cfg.hParams['v_init']))
startTime = 0
stopTime = sim.cfg.duration
if timeRange is not None:
startTime = timeRange[0]
stopTime = timeRange[1]
#kwargs = {'simTime': h.t}
kwargs = {}
if type(funcArgs) == dict:
kwargs.update(funcArgs)
if sim.rank == 0:
print('\nRunning with interval func ...')
if int(startTime) != 0:
sim.pc.psolve(startTime)
sim.pc.barrier()
while round(h.t) < stopTime:
sim.pc.psolve(min(sim.cfg.duration, h.t+interval))
func(simTime=h.t, **kwargs) # function to be called at intervals
if stopTime != sim.cfg.duration:
sim.pc.psolve(sim.cfg.duration)
sim.pc.barrier() # Wait for all hosts to get to this point
sim.timing('stop', 'runTime')
if sim.rank==0:
print((' Done; run time = %0.2f s; real-time ratio: %0.2f.' %
(sim.timingData['runTime'], sim.cfg.duration/1000/sim.timingData['runTime'])))
#------------------------------------------------------------------------------
# Calculate LFP (fucntion called at every time step)
#------------------------------------------------------------------------------
def calculateLFP():
"""
Function for/to <short description of `netpyne.sim.run.calculateLFP`>
"""
from .. import sim
# Set pointers to i_membrane in each cell (required form LFP calc )
for cell in sim.net.compartCells:
cell.setImembPtr()
# compute
saveStep = int(np.floor(h.t / sim.cfg.recordStep))
for cell in sim.net.compartCells: # compute ecp only from the biophysical cells
gid = cell.gid
im = cell.getImemb() # in nA
tr = sim.net.recXElectrode.getTransferResistance(gid) # in MOhm
ecp = np.dot(tr, im) # in mV (= R * I = MOhm * nA)
if sim.cfg.saveLFPPops:
if cell.gid in sim.net.popForEachGid:
pop = sim.net.popForEachGid[cell.gid]
sim.simData['LFPPops'][pop][saveStep - 1,:] += ecp # contribution of individual cells (stored optionally)
if sim.cfg.saveLFPCells and gid in sim.simData['LFPCells']:
sim.simData['LFPCells'][gid][saveStep - 1,:] = ecp # contribution of individual cells (stored optionally)
sim.simData['LFP'][saveStep - 1,:] += ecp # sum of all cells
#------------------------------------------------------------------------------
# Calculate and print load balance
#------------------------------------------------------------------------------
def loadBalance(printNodeTimes = False):
"""
Function for/to <short description of `netpyne.sim.run.loadBalance`>
Parameters
----------
printNodeTimes : bool
<Short description of printNodeTimes>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
computation_time = sim.pc.step_time()
max_comp_time = sim.pc.allreduce(computation_time, 2)
min_comp_time = sim.pc.allreduce(computation_time, 3)
avg_comp_time = sim.pc.allreduce(computation_time, 1)/sim.nhosts
load_balance = avg_comp_time/max_comp_time
if printNodeTimes:
print('node:',sim.rank,' comp_time:',computation_time)
if sim.rank==0:
print('max_comp_time:', max_comp_time)
print('min_comp_time:', min_comp_time)
print('avg_comp_time:', avg_comp_time)
print('load_balance:',load_balance)
print('\nspike exchange time (run_time-comp_time): ', sim.timingData['runTime'] - max_comp_time)
return [max_comp_time, min_comp_time, avg_comp_time, load_balance]
| Neurosim-lab/netpyne | netpyne/sim/run.py | Python | mit | 10,719 | [
"NEURON"
] | d4c1ce1648241d3df2253482605143c0e43019fab3f021182b2dfcd28612a58d |
"""Base class for mixture models."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from .. import cluster
from ..base import BaseEstimator
from ..base import DensityMixin
from ..exceptions import ConvergenceWarning
from ..utils import check_array, check_random_state
from ..utils.fixes import logsumexp
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : string
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError("The parameter '%s' should have the shape of %s, "
"but got %s" % (name, param_shape, param.shape))
def _check_X(X, n_components=None, n_features=None, ensure_min_samples=1):
"""Check the input data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
n_components : int
Returns
-------
X : array, shape (n_samples, n_features)
"""
X = check_array(X, dtype=[np.float64, np.float32],
ensure_min_samples=ensure_min_samples)
if n_components is not None and X.shape[0] < n_components:
raise ValueError('Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X.shape[0]))
if n_features is not None and X.shape[1] != n_features:
raise ValueError("Expected the input data X have %d features, "
"but got %d features"
% (n_features, X.shape[1]))
return X
class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
def __init__(self, n_components, tol, reg_covar,
max_iter, n_init, init_params, random_state, warm_start,
verbose, verbose_interval):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
def _check_initial_parameters(self, X):
"""Check values of the basic parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
if self.n_components < 1:
raise ValueError("Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% self.n_components)
if self.tol < 0.:
raise ValueError("Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% self.tol)
if self.n_init < 1:
raise ValueError("Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% self.n_init)
if self.max_iter < 1:
raise ValueError("Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% self.max_iter)
if self.reg_covar < 0.:
raise ValueError("Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative"
% self.reg_covar)
# Check all the parameters values of the derived class
self._check_parameters(X)
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state):
"""Initialize the model parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
random_state : RandomState
A random number generator instance.
"""
n_samples, _ = X.shape
if self.init_params == 'kmeans':
resp = np.zeros((n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1,
random_state=random_state).fit(X).labels_
resp[np.arange(n_samples), label] = 1
elif self.init_params == 'random':
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError("Unimplemented initialization method '%s'"
% self.init_params)
self._initialize(X, resp)
@abstractmethod
def _initialize(self, X, resp):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for ``max_iter``
times until the change of likelihood or lower bound is less than
``tol``, otherwise, a ``ConvergenceWarning`` is raised.
If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
initialization is performed upon the first call. Upon consecutive
calls, training starts where it left off.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self.fit_predict(X, y)
return self
def fit_predict(self, X, y=None):
"""Estimate model parameters using X and predict the labels for X.
The method fits the model n_init times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a `ConvergenceWarning` is raised. After fitting, it
predicts the most probable label for the input data points.
.. versionadded:: 0.20
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
X = _check_X(X, self.n_components, ensure_min_samples=2)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not(self.warm_start and hasattr(self, 'converged_'))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.infty
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state)
lower_bound = (-np.infty if do_init else self.lower_bound_)
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp)
lower_bound = self._compute_lower_bound(
log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(lower_bound)
if lower_bound > max_lower_bound:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn('Initialization %d did not converge. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% (init + 1), ConvergenceWarning)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X)
return log_resp.argmax(axis=1)
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _check_is_fitted(self):
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the weighted log probabilities for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log probabilities of each data point in X.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like, shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_likelihood : float
Log likelihood of the Gaussian mixture given X.
"""
return self.score_samples(X).mean()
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of each component given the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Returns the probability each Gaussian (state) in
the model given each sample.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample
y : array, shape (nsamples,)
Component labels
"""
self._check_is_fitted()
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components))
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == 'full':
X = np.vstack([
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
elif self.covariance_type == "tied":
X = np.vstack([
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(
self.means_, n_samples_comp)])
else:
X = np.vstack([
mean + rng.randn(sample, n_features) * np.sqrt(covariance)
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
y = np.concatenate([np.full(sample, j, dtype=int)
for j, sample in enumerate(n_samples_comp)])
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_samples, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under='ignore'):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(" Iteration %d\t time lapse %.5fs\t ll change %.5f" % (
n_iter, cur_time - self._iter_prev_time, diff_ll))
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print("Initialization converged: %s\t time lapse %.5fs\t ll %.5f" %
(self.converged_, time() - self._init_prev_time, ll))
| chrsrds/scikit-learn | sklearn/mixture/base.py | Python | bsd-3-clause | 18,167 | [
"Gaussian"
] | 8b154c1662b6fb0f80345a71bdaea1a142ee680cb452e9b3888550424b49257e |
#!/usr/bin/env python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import unittest
import glob
import moosesqa
import mooseutils
class TestSQA(unittest.TestCase):
"""Tests that make sure that SQA templates and SQA collections are documented."""
@classmethod
def setUpClass(cls):
cls.ROOT_DIR = mooseutils.git_root_dir(os.path.dirname(__file__))
cls.TEMPLATE_DIR = os.path.join(cls.ROOT_DIR, 'framework', 'doc', 'content', 'templates', 'sqa')
cls.TEMPLATE_NAMES =['far.md.template', 'rtm.md.template', 'sdd.md.template', 'srs.md.template',
'stp.md.template', 'vvr.md.template', 'cci.md.template', 'scs.md.template',
'sll.md.template', 'app_index.md.template',
'app_far.md.template', 'app_rtm.md.template', 'app_sdd.md.template',
'app_srs.md.template', 'app_stp.md.template', 'app_vvr.md.template',
'app_cci.md.template', 'app_scs.md.template', 'app_sll.md.template']
cls.DOC_FILE = os.path.join(cls.ROOT_DIR, 'modules', 'doc', 'content', 'python', 'MooseDocs', 'extensions', 'sqa.md')
cls.COLLECTIONS = {'FUNCTIONAL', 'USABILITY', 'PERFORMANCE', 'SYSTEM', 'FAILURE_ANALYSIS'}
def testTemplatesFiles(self):
for tname in TestSQA.TEMPLATE_NAMES:
fname = os.path.join(TestSQA.TEMPLATE_DIR, tname)
self.assertTrue(os.path.isfile(fname))
for filename in glob.glob(os.path.join(TestSQA.TEMPLATE_DIR, '*.md.template')):
self.assertIn(os.path.basename(filename), TestSQA.TEMPLATE_NAMES)
def testTemplateDocs(self):
self.assertTrue(os.path.isfile(TestSQA.DOC_FILE))
with open(TestSQA.DOC_FILE, 'r') as fid:
content = fid.read()
for tname in TestSQA.TEMPLATE_NAMES:
self.assertIn('+{}+\\'.format(tname), content)
def testCollectionNames(self):
self.assertEqual(moosesqa.MOOSESQA_COLLECTIONS, TestSQA.COLLECTIONS)
def testCollectionDocs(self):
self.assertTrue(os.path.isfile(TestSQA.DOC_FILE))
with open(TestSQA.DOC_FILE, 'r') as fid:
content = fid.read()
for cname in TestSQA.COLLECTIONS:
self.assertIn('+{}+:'.format(cname), content)
if __name__ == '__main__':
unittest.main(verbosity=2)
| harterj/moose | python/moosesqa/test/test_documents.py | Python | lgpl-2.1 | 2,645 | [
"MOOSE"
] | 866cb4c5fb8f55a9f2395fcb9877473fc0001ffc38e7c1e842ffdf1d1b4f8308 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.