text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from django.db import connection
from django.http import HttpResponse
from catmaid.control.authentication import requires_user_role
from catmaid.models import UserRole
from rest_framework.decorators import api_view
from rest_framework.response import Response
class LocationLookupError(Exception):
pass
@api_view(["GET"])
@requires_user_role([UserRole.Browse])
def transaction_collection(request, project_id):
"""Get a collection of all available transactions in the passed in project.
---
parameters:
- name: range_start
description: The first result element index.
type: integer
paramType: form
required: false
- name: range_length
description: The maximum number result elements.
type: integer
paramType: form
required: false
models:
transaction_entity:
id: transaction_entity
description: A result transaction.
properties:
change_type:
type: string
description: The type of change, either Backend, Migration or External.
required: true
execution_time:
type: string
description: The time point of the transaction.
required: true
label:
type: string
description: A reference to the creator of the transaction, the caller. Can be null.
required: true
user_id:
type: integer
description: User ID of transaction creator. Can be null.
required: true
project_id:
type: integer
description: Project ID of data changed in transaction. Can be null.
required: true
transaction_id:
type: integer
description: Transaction ID, only in combination with timestamp unique.
required: true
type:
transactions:
type: array
items:
$ref: transaction_entity
description: Matching transactions
required: true
total_count:
type: integer
description: The total number of elements
required: true
"""
if request.method == 'GET':
range_start = request.GET.get('range_start', None)
range_length = request.GET.get('range_length', None)
params = [project_id]
constraints = []
if range_start:
constraints.append("OFFSET %s")
params.append(range_start)
if range_length:
constraints.append("LIMIT %s")
params.append(range_length)
cursor = connection.cursor()
cursor.execute("""
SELECT row_to_json(cti), COUNT(*) OVER() AS full_count
FROM catmaid_transaction_info cti
WHERE project_id = %s
ORDER BY execution_time DESC {}
""".format(" ".join(constraints)), params)
result = cursor.fetchall()
json_data = [row[0] for row in result]
total_count = result[0][1] if len(json_data) > 0 else 0
return Response({
"transactions": json_data,
"total_count": total_count
})
@api_view(["GET"])
@requires_user_role([UserRole.Browse])
def get_location(request, project_id):
"""Try to associate a location in the passed in project for a particular
transaction.
---
parameters:
transaction_id:
type: integer
required: true
description: Transaction ID in question
paramType: form
execution_time:
type: string
required: true
description: Execution time of the transaction
paramType: form
label:
type: string
required: false
description: Optional label of the transaction to avoid extra lookup
paramType: form
type:
x:
type: integer
required: true
y:
type: integer
required: true
z:
type: integer
required: true
"""
if request.method == 'GET':
transaction_id = request.GET.get('transaction_id', None)
if not transaction_id:
raise ValueError("Need transaction ID")
transaction_id = int(transaction_id)
execution_time = request.GET.get('execution_time', None)
if not execution_time:
raise ValueError("Need execution time")
cursor = connection.cursor()
label = request.GET.get('label', None)
if not label:
cursor.execute("""
SELECT label FROM catmaid_transaction_info
WHERE transaction_id = %s AND execution_time = %s
""", (transaction_id, execution_time))
result = cursor.fetchone()
if not result:
raise ValueError("Couldn't find label for transaction {} and "
"execution time {}".format(transaction_id, execution_time))
label = result[0]
# Look first in live table and then in history table. Use only
# transaction ID for lookup
location = None
provider = location_queries.get(label)
if not provider:
raise LocationLookupError("A representative location for this change was not found")
query = provider.get()
while query:
cursor.execute(query, (transaction_id, ))
query = None
result = cursor.fetchall()
if result and len(result) == 1:
loc = result[0]
if len(loc) == 3:
location = (loc[0], loc[1], loc[2])
query = None
else:
raise ValueError("Couldn't read location information, "
"expected 3 columns, got {}".format(len(loc)))
if not location or len(location) != 3:
raise ValueError("Couldn't find location for transaction {}".format(transaction_id))
return Response({
'x': location[0],
'y': location[1],
'z': location[2]
})
class LocationQuery(object):
def __init__(self, query, history_suffix='__with_history', txid_column='txid'):
""" The query is a query string that selects tuples of three,
representing X, Y and Z coordinates of a location. If this string
contains "{history}", this part will be replaced by the history suffix,
which will replace the tablename with a reference to a history view,
which includes the live table as well as the history.
"""
self.txid_column = txid_column
self.history_suffix = history_suffix
self.query = query.format(history=history_suffix, txid=txid_column)
def get(self):
return self.query
class LocationRef(object):
def __init__(self, d, key): self.d, self.key = d, key
def get(self): return self.d[self.key].get()
location_queries = {}
location_queries.update({
# For annotations, select the root of the annotated neuron
'annotations.add': LocationQuery("""
SELECT location_x, location_y, location_z
FROM treenode{history} t
JOIN class_instance_class_instance{history} cici_s
ON (cici_s.class_instance_a = t.skeleton_id
AND t.parent_id IS NULL)
JOIN class_instance_class_instance{history} cici_e
ON (cici_s.class_instance_b = cici_e.class_instance_a
AND cici_e.{txid} = %s)
LIMIT 1
"""),
'annotations.remove': LocationQuery("""
SELECT location_x, location_y, location_z
FROM treenode{history} t
JOIN class_instance_class_instance{history} cici_s
ON (cici_s.class_instance_a = t.skeleton_id
AND t.parent_id IS NULL)
JOIN class_instance_class_instance__history cici_e
ON (cici_s.class_instance_b = cici_e.class_instance_a
AND cici_e.exec_transaction_id = %s)
LIMIT 1
"""),
'connectors.create': LocationRef(location_queries, "nodes.update_location"),
'connectors.remove': LocationQuery("""
SELECT c.location_x, c.location_y, c.location_z
FROM location__history c
WHERE c.exec_transaction_id = %s
LIMIT 1
"""),
'labels.remove': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM treenode_class_instance__history tci
JOIN treenode{history} t
ON t.id = tci.treenode_id
WHERE tci.exec_transaction_id = %s
LIMIT 1
"""),
'labels.update': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM treenode_class_instance{history} tci
JOIN treenode{history} t
ON t.id = tci.treenode_id
WHERE tci.{txid} = %s
LIMIT 1
"""),
'links.create': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM treenode_connector{history} tc
JOIN treenode{history} t
ON t.id = tc.treenode_id
WHERE tc.{txid} = %s
LIMIT 1
"""),
'links.remove': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM treenode_connector__history tc
JOIN treenode{history} t
ON t.id = tc.treenode_id
WHERE tc.{txid} = %s
"""),
'neurons.remove': LocationQuery("""
SELECT location_x, location_y, location_z
FROM treenode{history} t
JOIN class_instance_class_instance{history} cici_s
ON (cici_s.class_instance_a = t.skeleton_id
AND t.parent_id IS NULL)
JOIN class_instance_class_instance__history cici_e
ON (cici_s.class_instance_b = cici_e.class_instance_a
AND cici_e.{txid} = %s)
LIMIT 1
"""),
'neurons.rename': LocationQuery("""
SELECT location_x, location_y, location_z
FROM treenode{history} t
JOIN class_instance_class_instance{history} cici_s
ON (cici_s.class_instance_a = t.skeleton_id
AND t.parent_id IS NULL)
JOIN class_instance_class_instance__history{history} cici_e
ON (cici_s.class_instance_b = cici_e.class_instance_a
AND cici_e.{txid} = %s)
LIMIT 1
"""),
'nodes.add_or_update_review': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM review{history} r
JOIN treenode{history} t
ON t.id = r.treenode_id
WHERE r.{txid} = %s
LIMIT 1
"""),
'nodes.update_location': LocationQuery("""
SELECT location_x, location_y, location_z
FROM location{history}
WHERE {txid} = %s
LIMIT 1
"""),
'textlabels.create': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM textlabel{history} t
JOIN textlabel_location{history} tl
ON t.id = tl.textlabel_id
WHERE t.{txid} = %s
LIMIT 1
"""),
'textlabels.update': LocationRef(location_queries, "textlabels.create"),
'textlabels.delete': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM textlabel__history t
JOIN textlabel_location{history} tl
ON t.id = tl.textlabel_id
WHERE t.{txid} = %s
LIMIT 1
"""),
# Look transaction and edition time up in treenode table and return node
# location.
'treenodes.create': LocationRef(location_queries, "nodes.update_location"),
'treenodes.insert': LocationRef(location_queries, "nodes.update_location"),
'treenodes.remove': LocationRef(location_queries, "connectors.remove"),
'treenodes.update_confidence': LocationRef(location_queries, "nodes.update_location"),
'treenodes.update_parent': LocationRef(location_queries, "nodes.update_location"),
'treenodes.update_radius': LocationRef(location_queries, "nodes.update_location"),
'treenodes.suppress_virtual_node': LocationQuery("""
SELECT t.location_x, t.location_y, t.location_z
FROM suppressed_virtual_treenode{history} svt
JOIN treenode{history} t
ON t.id = svt.child_id
WHERE svt.{txid} = %s
LIMIT 1
"""),
'treenodes.unsuppress_virtual_node': LocationRef(location_queries,
"treenodes.suppress_virtual_node"),
})
|
catsop/CATMAID
|
django/applications/catmaid/control/transaction.py
|
Python
|
gpl-3.0
| 12,262
|
[
"NEURON"
] |
4fe307a1ce2c206f23a2b06efdbf8256f40f2b2ab468eaa8864696d52820afb9
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import sys
import xbmc
class LockWithDialog(object):
""" Decorator Class that locks a method using a busy dialog """
def __init__(self, logger = None):
""" Initializes the decorator with a specific method.
We need to use the Decorator as a function @LockWithDialog() to get the
'self' parameter passed on.
"""
self.logger = logger
return
def __call__(self, wrappedFunction):
""" When the method is called this is executed. """
def __InnerWrappedFunction(*args, **kwargs):
""" Function that get's called instead of the decorated function """
# show the busy dialog
if self.logger:
self.logger.Debug("Locking interface and showing BusyDialog")
xbmc.executebuiltin("ActivateWindow(busydialog)")
try:
response = wrappedFunction(*args, **kwargs)
#time.sleep(2)
except Exception:
# Hide the busy Dialog
if self.logger:
self.logger.Debug("Un-locking interface and hiding BusyDialog")
xbmc.executebuiltin("Dialog.Close(busydialog)")
# re-raise the exception with the original traceback info
# see http://nedbatchelder.com/blog/200711/rethrowing_exceptions_in_python.html
errorInfo = sys.exc_info()
raise errorInfo[1], None, errorInfo[2]
# Hide the busy Dialog
if self.logger:
self.logger.Debug("Un-locking interface and hiding BusyDialog")
xbmc.executebuiltin("Dialog.Close(busydialog)")
return response
return __InnerWrappedFunction
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/locker.py
|
Python
|
gpl-2.0
| 2,507
|
[
"VisIt"
] |
2a7f4d4527ebc7cf7559660793f4fc4ce08e097be9d16c22eff6c3e73c330e01
|
# Copyright (c) 2017, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from netCDF4 import Dataset
from math import sqrt
import numpy as np
from cmath import exp
I = complex(0,1)
ha2ev = 27.211396132
ev2cm1 = 8065.54429
abs2 = lambda x: x.real**2 + x.imag**2
class YamboElectronPhononDB():
"""
Python class to read the electron-phonon matrix elements from yambo
"""
def __init__(self,lattice,filename='ndb.elph_gkkp',save='SAVE',only_freqs=False):
self.lattice = lattice
self.save = save
self.filename = "%s/%s"%(save,filename)
self.ph_eigenvalues = None
self.car_kpoints = lattice.car_kpoints
self.red_kpoints = lattice.red_kpoints
#read dimensions of electron phonon parameters
try:
database = Dataset(self.filename)
except:
print "error opening %s in YamboElectronPhononDB"%self.filename
exit()
self.qpoints = database.variables['PH_Q'][:].T
self.nmodes, self.nqpoints, self.nkpoints, self.nbands = database.variables['PARS'][:4].astype(int)
self.natoms = self.nmodes/3
database.close()
self.readDB()
def get_elphon(self,dir=0):
if self.gkkp is None:
self.get_elphon_databases()
kpts, nks, nss = self.expand_kpts()
gkkp = self.gkkp
return gkkp, kpts
def readDB(self,only_freqs=False):
"""
Load all the gkkp databases to memory
"""
self.ph_eigenvalues = np.zeros([self.nqpoints,self.nmodes])
self.ph_eigenvectors = np.zeros([self.nqpoints,self.nmodes,self.nmodes/3,3],dtype=np.complex64)
if not only_freqs:
self.gkkp = np.zeros([self.nqpoints,self.nkpoints,self.nmodes,self.nbands,self.nbands],dtype=np.complex64)
for nq in xrange(self.nqpoints):
filename = '%s_fragment_%d'%(self.filename,nq+1)
database = Dataset(filename)
self.ph_eigenvalues[nq] = np.sqrt(database.variables['PH_FREQS%d'%(nq+1)][:])
p_re = database.variables['POLARIZATION_VECTORS_REAL'][:].T
p_im = database.variables['POLARIZATION_VECTORS_IMAG'][:].T
self.ph_eigenvectors[nq] = p_re + p_im*I
if not only_freqs:
gkkp = database.variables['ELPH_GKKP_Q%d'%(nq+1)][:]
self.gkkp[nq] = (gkkp[:,0,:,:] + I*gkkp[:,1,:,:]).reshape([self.nkpoints,self.nmodes,self.nbands,self.nbands])
database.close()
if not only_freqs:
return self.gkkp
def __str__(self):
if self.ph_eigenvalues is None:
self.get_elphon_databases()
s = 'nqpoints: %d\n'%self.nqpoints
s+= 'nkpoints: %d\n'%self.nkpoints
s+= 'nmodes: %d\n'%self.nmodes
s+= 'natoms: %d\n'%self.natoms
s+= 'nbands: %d\n'%self.nbands
for nq in xrange(self.nqpoints):
s+= 'nqpoint %d\n'%nq
for n,mode in enumerate(self.ph_eigenvectors[nq]):
s+= 'mode %d freq: %lf cm-1\n'%(n,self.ph_eigenvalues[nq][n]*ha2ev*ev2cm1)
for a in xrange(self.natoms):
s += ("%12.8lf "*3+'\n')%tuple(mode[a].real)
return s
if __name__ == '__main__':
elph = ElectronPhononDB()
print elph
elph.get_databases()
|
henriquemiranda/yambopy
|
yambopy/dbs/elphondb.py
|
Python
|
bsd-3-clause
| 3,462
|
[
"Yambo"
] |
477399e5df447fdf920e4a490185ad4f557c3333318f4444ba6ef03494ed68f4
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
import logging
import os
import posixpath
import re
import shutil
from commoncode import fileutils
logger = logging.getLogger(__name__)
DEBUG = False
# import sys
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
# logger.setLevel(logging.DEBUG)
root_dir = os.path.join(os.path.dirname(__file__), 'bin')
# Suffix added to extracted target_dir paths
EXTRACT_SUFFIX = r'-extract'
# high level archive "kinds"
docs = 1
regular = 2
regular_nested = 3
package = 4
file_system = 5
patches = 6
special_package = 7
kind_labels = {
1: 'docs',
2: 'regular',
3: 'regular',
4: 'package',
5: 'file_system',
6: 'patches',
7: 'special_package',
}
# note: do not include special_package in all by default
all_kinds = (regular, regular_nested, package, file_system, docs, patches, special_package)
default_kinds = (regular, regular_nested, package)
# map user-visible extract types to tuples of "kinds"
extract_types = {
'default': (regular, regular_nested, package,),
'all': all_kinds,
'package': (package,),
'filesystem': (file_system,),
'doc': (docs,),
'patch': (patches,),
'special_package': (special_package,),
}
def is_extraction_path(path):
"""
Return True is the path points to an extraction path.
"""
return path and path.rstrip('\\/').endswith(EXTRACT_SUFFIX)
def is_extracted(location):
"""
Return True is the location is already extracted to the corresponding
extraction location.
"""
return location and os.path.exists(get_extraction_path(location))
def get_extraction_path(path):
"""
Return a path where to extract.
"""
return path.rstrip('\\/') + EXTRACT_SUFFIX
def remove_archive_suffix(path):
"""
Remove all the extracted suffix from a path.
"""
return re.sub(EXTRACT_SUFFIX, '', path)
def remove_backslashes(directory):
"""
Walk a directory and rename the files if their names contain backslashes.
Return a list of errors if any.
"""
errors = []
for top, _, files in os.walk(str(directory)):
for filename in files:
if '\\' in filename or '..' in filename:
try:
new_path = fileutils.as_posixpath(filename)
new_path = new_path.strip('/')
new_path = posixpath.normpath(new_path)
new_path = new_path.replace('..', '/')
new_path = new_path.strip('/')
new_path = posixpath.normpath(new_path)
segments = new_path.split('/')
directory = os.path.join(top, *segments[:-1])
fileutils.create_dir(directory)
shutil.move(os.path.join(top, filename), os.path.join(top, *segments))
except Exception:
errors.append(os.path.join(top, filename))
return errors
def extracted_files(location):
"""
Yield the locations of extracted files in a directory location.
"""
assert location
logger.debug('extracted_files for: %(location)r' % locals())
return fileutils.file_iter(location)
def new_name(location, is_dir=False):
"""
Return a new non-existing location usable to write a file or create
directory without overwriting existing files or directories in the same
parent directory, ignoring the case of the name.
The case of the name is ignored to ensure that similar results are returned
across case sensitive (*nix) and case insensitive file systems.
To find a new unique name:
* pad a directory name with _X where X is an incremented number.
* pad a file base name with _X where X is an incremented number and keep
the extension unchanged.
"""
assert location
location = location.rstrip('\\/')
name = fileutils.file_name(location).strip()
if (not name or name == '.'
# windows bare drive path as in c: or z:
or (name and len(name)==2 and name.endswith(':'))):
name = 'file'
parent = fileutils.parent_directory(location)
# all existing files or directory as lower case
siblings_lower = set(s.lower() for s in os.listdir(parent))
if name.lower() not in siblings_lower:
return posixpath.join(parent, name)
ext = fileutils.file_extension(name)
base_name = fileutils.file_base_name(name)
if is_dir:
# directories have no extension
ext = ''
base_name = name
counter = 1
while True:
new_name = base_name + '_' + str(counter) + ext
if new_name.lower() not in siblings_lower:
break
counter += 1
return os.path.join(parent, new_name)
class Entry(object):
"""
An archive entry presenting the common data that exists in all entries
handled by the various underlying extraction libraries.
This class interface is similar to the TypeCode Type class.
"""
# the actual posix as in the archive (relative, absolute, etc)
path = None
# path to use for links, typically a normalized target
actual_path = None
# where we will really extract, relative to the archive root
extraction_path = None
size = 0
date = None
is_file = True
is_dir = False
is_special = False
is_hardlink = False
is_symlink = False
is_broken_link = False
link_target = None
should_extract = False
def fix_path(self):
"""
Fix paths that are absolute, relative, backslashes and other
shenanigans. Update the extraction path.
"""
def __repr__(self):
msg = (
'%(__name__)s(path=%(path)r, size=%(size)r, '
'is_file=%(is_file)r, is_dir=%(is_dir)r, '
'is_hardlink=%(is_hardlink)r, is_symlink=%(is_symlink)r, '
'link_target=%(link_target)r, is_broken_link=%(is_broken_link)r, '
'is_special=%(is_special)r)'
)
d = dict(self.__class__.__dict__)
d.update(self.__dict__)
d['__name__'] = self.__class__.__name__
return msg % d
def asdict(self):
return {
'path':self.path,
'size': self.size,
'is_file': self.is_file,
'is_dir': self.is_dir,
'is_hardlink': self.is_hardlink,
'is_symlink': self.is_symlink,
'link_target': self.link_target,
'is_broken_link': self.is_broken_link,
'is_special': self.is_special
}
class ExtractError(Exception):
pass
class ExtractErrorPasswordProtected(ExtractError):
pass
class ExtractErrorFailedToExtract(ExtractError):
pass
class ExtractWarningIncorrectEntry(ExtractError):
pass
class ExtractWarningTrailingGarbage(ExtractError):
pass
|
retrography/scancode-toolkit
|
src/extractcode/__init__.py
|
Python
|
apache-2.0
| 8,181
|
[
"VisIt"
] |
c6fa9724283c488da454207cf3c4b2e526c51aecff80714d485ef7d299d5e5be
|
from __future__ import print_function
from builtins import range
import numpy as np
from numpy import cos, sin, pi
from enthought.tvtk.api import tvtk
from enthought.mayavi.scripts import mayavi2
def aligned_points(grid, nz=1, period=1.0, maxshift=0.4):
try:
nx = grid["nx"]
ny = grid["ny"]
zshift = grid["zShift"]
Rxy = grid["Rxy"]
Zxy = grid["Zxy"]
except:
print("Missing required data")
return None
dz = 2.*pi / (period * (nz-1))
phi0 = np.linspace(0,2.*pi / period, nz)
# Need to insert additional points in Y so mesh looks smooth
#for y in range(1,ny):
# ms = np.max(np.abs(zshift[:,y] - zshift[:,y-1]))
# if(
# Create array of points, structured
points = np.zeros([nx*ny*nz, 3])
start = 0
for y in range(ny):
end = start + nx*nz
phi = zshift[:,y] + phi0[:,None]
r = Rxy[:,y] + (np.zeros([nz]))[:,None]
xz_points = points[start:end]
xz_points[:,0] = (r*cos(phi)).ravel() # X
xz_points[:,1] = (r*sin(phi)).ravel() # Y
xz_points[:,2] = (Zxy[:,y]+(np.zeros([nz]))[:,None]).ravel() # Z
start = end
return points
def create_grid(grid, data, period=1):
s = np.shape(data)
nx = grid["nx"]
ny = grid["ny"]
nz = s[2]
print("data: %d,%d,%d grid: %d,%d\n" % (s[0],s[1],s[2], nx,ny))
dims = (nx, nz, ny)
sgrid = tvtk.StructuredGrid(dimensions=dims)
pts = aligned_points(grid, nz, period)
print(np.shape(pts))
sgrid.points = pts
scalar = np.zeros([nx*ny*nz])
start = 0
for y in range(ny):
end = start + nx*nz
scalar[start:end] = (data[:,y,:]).transpose().ravel()
print(y, " = " , np.max(scalar[start:end]))
start = end
sgrid.point_data.scalars = np.ravel(scalar.copy())
sgrid.point_data.scalars.name = "data"
return sgrid
@mayavi2.standalone
def view3d(sgrid):
from enthought.mayavi.sources.vtk_data_source import VTKDataSource
from enthought.mayavi.modules.api import Outline, GridPlane
mayavi.new_scene()
src = VTKDataSource(data=sgrid)
mayavi.add_source(src)
mayavi.add_module(Outline())
g = GridPlane()
g.grid_plane.axis = 'x'
mayavi.add_module(g)
if __name__ == '__main__':
from boutdata import collect
from boututils import file_import
path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/"
#path="/home/ben/run4"
#g = file_import("../cbm18_dens8.grid_nx68ny64.nc")
g = file_import("data/cbm18_8_y064_x516_090309.nc")
#g = file_import("/home/ben/run4/reduced_y064_x256.nc")
data = collect("P", tind=50, path=path)
data = data[0,:,:,:]
s = np.shape(data)
nz = s[2]
bkgd = collect("P0", path=path)
for z in range(nz):
data[:,:,z] += bkgd
# Create a structured grid
sgrid = create_grid(g, data, 10)
w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
w.write()
# View the structured grid
view3d(sgrid)
|
kevinpetersavage/BOUT-dev
|
tools/pylib/boutdata/mayavi2.py
|
Python
|
gpl-3.0
| 3,148
|
[
"Mayavi"
] |
f214ce2d8d8238093cbc33b11219471f13cb3056996a9c24166057fdf82418fa
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2012-2013 Jonathan Topf, Jupiter Jazz Limited
# Copyright (c) 2014-2017 Jonathan Topf, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import datetime
import os
import random
import shutil
import socket
import string
import subprocess
import sys
import time
import traceback
import xml.dom.minidom as xml
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "1.18"
RENDERS_DIR = "_renders"
ARCHIVE_DIR = "_archives"
LOGS_DIR = "_logs"
PAUSE_BETWEEN_CHECKS = 10 # in seconds
DEFAULT_TOOL_FILENAME = "appleseed.cli.exe" if os.name == "nt" else "appleseed.cli"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def safe_mkdir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
def convert_path_to_local(path):
if os.name == "nt":
return path.replace('/', '\\')
else:
return path.replace('\\', '/')
def format_message(severity, msg):
now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S.%f")
padded_severity = severity.ljust(7)
return "\n".join("{0} node {1} | {2}".format(timestamp, padded_severity, line)
for line in msg.splitlines())
VALID_USER_NAME_CHARS = frozenset("%s%s_-" % (string.ascii_letters, string.digits))
def cleanup_user_name(user_name):
return "".join(c if c in VALID_USER_NAME_CHARS else '_' for c in user_name)
#--------------------------------------------------------------------------------------------------
# Log backend to write to the console, using colors on systems that support them.
#--------------------------------------------------------------------------------------------------
class ConsoleBackend:
@staticmethod
def info(msg):
print("{0}".format(msg))
@staticmethod
def warning(msg):
if ConsoleBackend.is_coloring_supported():
print("\033[93m{0}\033[0m".format(msg))
else:
print("{0}".format(msg))
@staticmethod
def error(msg):
if ConsoleBackend.is_coloring_supported():
print("\033[91m{0}\033[0m".format(msg))
else:
print("{0}".format(msg))
@staticmethod
def is_coloring_supported():
return os.system == 'darwin'
#--------------------------------------------------------------------------------------------------
# Log backend to write to a log file.
#--------------------------------------------------------------------------------------------------
class LogFileBackend:
def __init__(self, path):
self.path = path
def write(self, msg):
safe_mkdir(os.path.dirname(self.path))
with open(self.path, "a") as file:
file.write(msg + "\n")
#--------------------------------------------------------------------------------------------------
# Log class to simultaneously write to a log file and to the console.
#--------------------------------------------------------------------------------------------------
class Log:
def __init__(self, path):
self.log_file = LogFileBackend(path)
def info(self, msg):
formatted_msg = format_message("info", msg)
self.log_file.write(formatted_msg)
ConsoleBackend.info(formatted_msg)
def warning(self, msg):
formatted_msg = format_message("warning", msg)
self.log_file.write(formatted_msg)
ConsoleBackend.warning(formatted_msg)
def error(self, msg):
formatted_msg = format_message("error", msg)
self.log_file.write(formatted_msg)
ConsoleBackend.error(formatted_msg)
@staticmethod
def info_no_log(msg):
ConsoleBackend.info(format_message("info", msg))
@staticmethod
def warning_no_log(msg):
ConsoleBackend.warning(format_message("warning", msg))
@staticmethod
def error_no_log(msg):
ConsoleBackend.error(format_message("error", msg))
#--------------------------------------------------------------------------------------------------
# Code to temporarily disable Windows Error Reporting.
#--------------------------------------------------------------------------------------------------
if os.name == "nt":
import _winreg
WER_KEY_PATH = r"Software\Microsoft\Windows\Windows Error Reporting"
def open_wer_key():
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, WER_KEY_PATH, 0,
_winreg.KEY_ALL_ACCESS)
except WindowsError:
pass
try:
return _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, WER_KEY_PATH)
except WindowsError:
pass
return None
def configure_wer(dont_show_ui, disabled):
key = open_wer_key()
if key is None:
return None
previous_dont_show_ui = _winreg.QueryValueEx(key, "DontShowUI")[0]
previous_disabled = _winreg.QueryValueEx(key, "Disabled")[0]
_winreg.SetValueEx(key, "DontShowUI", 0, _winreg.REG_DWORD, dont_show_ui)
_winreg.SetValueEx(key, "Disabled", 0, _winreg.REG_DWORD, disabled)
_winreg.CloseKey(key)
return previous_dont_show_ui, previous_disabled
def get_wer_status():
key = open_wer_key()
if key is None:
return "(unavailable)"
dont_show_ui = _winreg.QueryValueEx(key, "DontShowUI")[0]
disabled = _winreg.QueryValueEx(key, "Disabled")[0]
_winreg.CloseKey(key)
return "DontShowUI={0} Disabled={1}".format(dont_show_ui, disabled)
def enable_wer(log):
log.info("enabling windows error reporting...")
previous_values = configure_wer(0, 0)
if previous_values is None:
log.warning("could not enable windows error reporting.")
return previous_values
def disable_wer(log):
log.info("disabling windows error reporting...")
previous_values = configure_wer(1, 1)
if previous_values is None:
log.warning("could not disable windows error reporting.")
return previous_values
def restore_wer(previous_values, log):
log.info("restoring initial windows error reporting parameters...")
if configure_wer(previous_values[0], previous_values[1]) is None:
log.warning("could not restore initial windows error reporting parameters.")
#--------------------------------------------------------------------------------------------------
# Launches appleseed.cli and print appleseed version information.
#--------------------------------------------------------------------------------------------------
def print_appleseed_version(args, log):
try:
p = subprocess.Popen([args.tool_path, "--version", "--system"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = p.communicate()[1].split(os.linesep, 1)
if p.returncode != 0:
log.error("failed to query {0} version (return code: {1}).".format(args.tool_path, p.returncode))
sys.exit(1)
for line in output:
log.info("{0}".format(line))
except OSError:
log.error("failed to query {0} version.".format(args.tool_path))
sys.exit(1)
#--------------------------------------------------------------------------------------------------
# Rendering logic.
#--------------------------------------------------------------------------------------------------
class ProcessFailedException(Exception):
pass
def render_project(args, project_filepath, log):
# Assign the project file to ourselves.
assigned_project_filepath = project_filepath + "." + args.user_name
try:
os.rename(project_filepath, assigned_project_filepath)
except:
# log.warning("failed to acquire {0}.".format(project_filepath))
return False
log.info("starting rendering {0}...".format(project_filepath))
start_time = datetime.datetime.now()
try:
# Create shell command.
project_filename = os.path.split(project_filepath)[1]
output_filename = os.path.splitext(project_filename)[0] + '.' + args.output_format
output_filepath = os.path.join(args.directory, RENDERS_DIR, output_filename)
command = '"{0}" -o "{1}" "{2}"'.format(args.tool_path, output_filepath, assigned_project_filepath)
if args.args:
command += ' {0}'.format(" ".join(args.args))
# Make sure the output directory exists.
safe_mkdir(os.path.join(args.directory, RENDERS_DIR))
# Execute command.
result = subprocess.call(command, shell=True)
if result != 0:
raise ProcessFailedException()
except:
# Rendering failed.
log.error("failed to render {0}.".format(project_filepath))
# Unassign the project file.
try:
os.rename(assigned_project_filepath, project_filepath)
except:
pass
# Propagate the exception.
raise
# Rendering succeeded.
rendering_time = datetime.datetime.now() - start_time
log.info("successfully rendered {0} in {1}.".format(project_filepath, rendering_time))
# Move the project file to the archive directory.
archive_dir = os.path.join(args.directory, ARCHIVE_DIR)
archived_project_filepath = os.path.join(archive_dir, project_filename)
try:
safe_mkdir(archive_dir)
shutil.move(assigned_project_filepath, archived_project_filepath)
except:
log.error("failed to move {0} to {1}.".format(assigned_project_filepath, archived_project_filepath))
return True
#--------------------------------------------------------------------------------------------------
# Watching logic.
#--------------------------------------------------------------------------------------------------
def get_project_files(directory):
project_files = []
for entry in os.listdir(directory):
filepath = os.path.join(directory, entry)
if os.path.isfile(filepath):
if os.path.splitext(filepath)[1] == '.appleseed':
project_files.append(filepath)
return project_files
def extract_project_deps(project_filepath, log):
try:
with open(project_filepath, 'r') as file:
contents = file.read()
except:
# log.warning("failed to acquire {0}.".format(project_filepath))
return False, set()
deps = set()
directory = os.path.split(project_filepath)[0]
for node in xml.parseString(contents).getElementsByTagName('parameter'):
if node.getAttribute('name') == 'filename':
filepath = node.getAttribute('value')
filepath = convert_path_to_local(filepath)
filepath = os.path.join(directory, filepath)
deps.add(filepath)
for node in xml.parseString(contents).getElementsByTagName('parameters'):
if node.getAttribute('name') == 'filename':
for child in node.childNodes:
if child.nodeType == xml.Node.ELEMENT_NODE:
filepath = child.getAttribute('value')
filepath = convert_path_to_local(filepath)
filepath = os.path.join(directory, filepath)
deps.add(filepath)
return True, deps
def gather_missing_project_deps(deps):
missing_deps = []
for filepath in deps:
if not os.path.exists(filepath):
missing_deps.append(filepath)
return missing_deps
def watch(args, log):
# Look for project files in the watch directory.
project_files = get_project_files(args.directory)
# No project file found.
if len(project_files) == 0:
Log.info_no_log("waiting for incoming data...")
return False
# Shuffle the array of project files.
random.shuffle(project_files)
# Render the first project file that has no missing dependencies.
for project_filepath in project_files:
deps_success, deps = extract_project_deps(project_filepath, log)
if not deps_success:
continue
missing_deps = gather_missing_project_deps(deps)
if len(missing_deps) > 0:
if args.print_missing_deps:
Log.info_no_log("{0} missing dependencies for {1}:".format(len(missing_deps),
project_filepath))
for dep in missing_deps:
Log.info_no_log(" {0}".format(dep))
else:
Log.info_no_log("{0} missing dependencies for {1}.".format(len(missing_deps),
project_filepath))
continue
return render_project(args, project_filepath, log)
# None of the project file has all its dependencies ready.
return False
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
# Parse the command line.
parser = argparse.ArgumentParser(description="continuously watch a directory and render any "
"appleseed project file that appears in it.")
parser.add_argument("-t", "--tool-path", metavar="tool-path",
help="set the path to the appleseed.cli tool")
parser.add_argument("-f", "--format", dest="output_format", metavar="FORMAT", default="exr",
help="set output format (e.g. png, exr)")
parser.add_argument("-u", "--user", dest="user_name", metavar="NAME",
help="set user name (by default the host name is used)")
parser.add_argument("-p", "--parameter", dest="args", metavar="ARG", nargs="*",
help="forward additional arguments to appleseed")
parser.add_argument("--print-missing-deps", action='store_true',
help="print missing dependencies")
parser.add_argument("directory", help="directory to watch")
args = parser.parse_args()
# If no tool path is provided, search for the tool in the same directory as this script.
if args.tool_path is None:
script_directory = os.path.dirname(os.path.realpath(__file__))
args.tool_path = os.path.join(script_directory, DEFAULT_TOOL_FILENAME)
print("setting tool path to {0}.".format(args.tool_path))
# If no watch directory is provided, watch the current directory.
if args.directory is None:
args.directory = os.getcwd()
# If no user name is provided, use the host name.
if args.user_name is None:
args.user_name = socket.gethostname()
# Clean up the user name.
args.user_name = cleanup_user_name(args.user_name)
# Start the log.
log = Log(os.path.join(args.directory, LOGS_DIR, args.user_name + ".log"))
log.info("--- starting logging ---")
# Print version information.
log.info("running rendernode.py version {0}.".format(VERSION))
print_appleseed_version(args, log)
# Print the user name.
log.info("user name is {0}.".format(args.user_name))
# Disable Windows Error Reporting on Windows.
if os.name == "nt":
log.info("initial windows error reporting status: {0}".format(get_wer_status()))
initial_wer_values = disable_wer(log)
log.info("new windows error reporting status: {0}".format(get_wer_status()))
log.info("watching directory {0}".format(os.path.abspath(args.directory)))
random.seed()
# Main watch/render loop.
try:
while True:
try:
while watch(args, log):
pass
except KeyboardInterrupt, SystemExit:
raise
except ProcessFailedException:
pass
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log.error("".join(line for line in lines))
time.sleep(PAUSE_BETWEEN_CHECKS)
except KeyboardInterrupt, SystemExit:
pass
# Restore initial Windows Error Reporting parameters.
if os.name == "nt":
restore_wer(initial_wer_values, log)
log.info("final windows error reporting status: {0}".format(get_wer_status()))
log.info("exiting...")
if __name__ == '__main__':
main()
|
aytekaman/appleseed
|
scripts/rendernode.py
|
Python
|
mit
| 18,017
|
[
"VisIt"
] |
36552e36a03585c18b4b6b092963c9bbce7ce3e2bcac7d2000b830a2f3a00fb2
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import collections
import qiime2.core.type.grammar as grammar
class TestTypeBase(unittest.TestCase):
def setUp(self):
class Example(grammar._TypeBase):
__getitem__ = __or__ = __and__ = lambda s, x: x
def __eq__(self, other):
return False
self.Example = Example
def test_ne(self):
example = self.Example()
self.assertNotEqual(example, 42)
self.assertNotEqual(42, example)
def test_rmod(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'right-hand'):
42 % example
def test_rand(self):
self.assertEqual(42 & self.Example(), 42)
def test_ror(self):
self.assertEqual(42 | self.Example(), 42)
def test_delattr(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'immutable'):
del example.foo
def test_setitem(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'immutable'):
example['foo'] = 1
def test_delitem(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'immutable'):
del example['foo']
def test_getitem(self):
example = self.Example()
self.assertEqual(example[1], 1)
def test_freeze(self):
example = self.Example()
example.foo = 1
self.assertEqual(example.foo, 1)
example._freeze_()
self.assertEqual(example.foo, 1)
with self.assertRaisesRegex(TypeError, 'immutable'):
example.foo = 1
with self.assertRaisesRegex(TypeError, 'immutable'):
example.bar = 1
# These tests are not concerned with rewriting properties on the class,
# that behaviour is left unspecified to match Python.
class TestCompositeType(unittest.TestCase):
def test_immutable(self):
# If this test fails, then the hiearchy has been rearranged and the
# properties tested for `_TypeBase` should be tested for
# this class.
# - Your Friendly Dead Man's Switch
self.assertIsInstance(grammar.CompositeType('Example', ('foo',)),
grammar._TypeBase)
def test_field_sanity(self):
with self.assertRaisesRegex(ValueError, 'empty'):
grammar.CompositeType('Example', ())
def test_mod(self):
with self.assertRaisesRegex(TypeError, 'predicate'):
grammar.CompositeType('Example', ('foo',)) % None
def test_or(self):
with self.assertRaisesRegex(TypeError, 'union'):
grammar.CompositeType('Example', ('foo',)) | None
def test_and(self):
with self.assertRaisesRegex(TypeError, 'intersect'):
grammar.CompositeType('Example', ('foo',)) & None
def test_repr(self):
self.assertEqual(repr(grammar.CompositeType('Example', ('foo',))),
'Example[{foo}]')
self.assertEqual(repr(grammar.CompositeType('Example', ('f', 'b'))),
'Example[{f}, {b}]')
def test_validate_field_w_typeexp(self):
Example = grammar.CompositeType('Example', ('foo',))
# Check that no error is raised:
Example._validate_field_('foo', grammar.TypeExpression('X'))
# Test passed if we reach this line.
def test_validate_field_w_comptype(self):
Example = grammar.CompositeType('Example', ('foo',))
with self.assertRaisesRegex(TypeError, 'Incomplete'):
Example._validate_field_('foo', Example)
def test_validate_field_w_nonsense(self):
Example = grammar.CompositeType('Example', ('foo',))
with self.assertRaisesRegex(TypeError, 'Ellipsis'):
Example._validate_field_('foo', Ellipsis)
def test_apply_fields(self):
X = grammar.TypeExpression('X')
Example = grammar.CompositeType('Example', ('foo',))
result = Example._apply_fields_((X,))
self.assertEqual(result.fields, (X,))
self.assertEqual(result.name, 'Example')
self.assertIsInstance(result, grammar.TypeExpression)
def test_iter_symbols(self):
Example = grammar.CompositeType('Example', ('foo',))
self.assertEqual(list(Example.iter_symbols()), ['Example'])
class TestCompositeTypeGetItem(unittest.TestCase):
def setUp(self):
self.local = {}
def test_wrong_length(self):
X = grammar.TypeExpression('X')
composite_type = grammar.CompositeType('C', ['foo', 'bar'])
with self.assertRaisesRegex(TypeError, '1'):
composite_type[X]
composite_type = grammar.CompositeType('C', ['foo'])
with self.assertRaisesRegex(TypeError, '2'):
composite_type[X, X]
def test_nested_expression(self):
X = grammar.TypeExpression('X')
C = grammar.CompositeType('C', ['foo', 'bar'])
self.assertEqual(repr(C[X, C[C[X, X], X]]), 'C[X, C[C[X, X], X]]')
def test_validate_field_called(self):
class MyCompositeType(grammar.CompositeType):
def _validate_field_(s, name, value):
self.local['name'] = name
self.local['value'] = value
my_type = MyCompositeType('MyType', ['foo'])
my_type[...]
self.assertEqual(self.local['name'], 'foo')
self.assertEqual(self.local['value'], ...)
def test_apply_fields_called(self):
class MyCompositeType(grammar.CompositeType):
def _validate_field_(*args):
pass # Let anything through
def _apply_fields_(s, fields):
self.local['fields'] = fields
return ...
my_type = MyCompositeType('MyType', ['foo'])
type_exp = my_type['!'] # '!' is not a `TypeExpression`
self.assertEqual(self.local['fields'], ('!',))
self.assertEqual(type_exp, ...)
class TestTypeExpression(unittest.TestCase):
def test_immutable(self):
# If this test fails, then the hiearchy has been rearranged and the
# properties tested for `_TypeBase` should be tested for
# this class.
# - Your Friendly Dead Man's Switch
self.assertIsInstance(grammar.TypeExpression('X'),
grammar._TypeBase)
def test_hashable(self):
a = grammar.TypeExpression('X')
b = grammar.TypeExpression('Y', fields=(a,))
c = grammar.TypeExpression('Y', fields=(a,))
d = grammar.TypeExpression('Z', predicate=grammar.Predicate("stuff"))
self.assertIsInstance(a, collections.Hashable)
# There really shouldn't be a collision between these:
self.assertNotEqual(hash(a), hash(d))
self.assertEqual(b, c)
self.assertEqual(hash(b), hash(c))
# TODO: Test dictionaries work well
def test_eq_nonsense(self):
X = grammar.TypeExpression('X')
self.assertIs(X.__eq__(42), NotImplemented)
self.assertFalse(X == 42)
def test_eq_different_instances(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertIsNot(X, X_)
self.assertEqual(X, X_)
# TODO: Add more equality tests
def test_mod(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'fields'):
X['scikit-bio/assets/.no.gif']
Y = grammar.TypeExpression('Y', fields=(X,))
with self.assertRaisesRegex(TypeError, 'fields'):
Y[';-)']
def test_repr(self):
# Subclass elements to demonstrate dispatch occurs correctly.
class Face1(grammar.TypeExpression):
def __repr__(self):
return "-_-"
class Exclaim(grammar.TypeExpression):
def __repr__(self):
return '!'
class Face2(grammar.Predicate):
def __repr__(self):
return '(o_o)'
self.assertEqual(
repr(grammar.TypeExpression('!')),
'!')
self.assertEqual(
repr(grammar.TypeExpression('!', fields=(Face1(''),))),
'![-_-]')
self.assertEqual(
repr(grammar.TypeExpression('!',
fields=(Face1(''), Exclaim('!')))),
'![-_-, !]')
self.assertEqual(
repr(grammar.TypeExpression('!',
fields=(Face1(''), Exclaim('!')),
predicate=Face2(True))),
'![-_-, !] % (o_o)')
self.assertEqual(
repr(grammar.TypeExpression('(o_-)',
predicate=Face2(True))),
'(o_-) % (o_o)')
def test_validate_union_w_nonsense(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'expression'):
X._validate_union_(42)
def test_validate_union_w_composite_type(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'incomplete'):
X._validate_union_(grammar.CompositeType('A', field_names=('X',)))
def test_validate_union_w_valid(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
X._validate_union_(Y)
def test_validate_union_implements_handshake(self):
local = {}
X = grammar.TypeExpression('X')
class Example(grammar.TypeExpression):
def _validate_union_(self, other, handshake=False):
local['other'] = other
local['handshake'] = handshake
X._validate_union_(Example('Example'))
self.assertIs(local['other'], X)
self.assertTrue(local['handshake'])
def test_build_union(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
union = X._build_union_((X, Y))
self.assertIsInstance(union, grammar.UnionTypeExpression)
self.assertEqual(union.members, frozenset({X, Y}))
def test_validate_intersection_w_nonsense(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'expression'):
X._validate_intersection_(42)
def test_validate_intersection_w_composite_type(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'incomplete'):
X._validate_intersection_(
grammar.CompositeType('A', field_names=('X',)))
def test_validate_intersection_w_valid(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
X._validate_intersection_(Y)
def test_validate_intersection_implements_handshake(self):
local = {}
X = grammar.TypeExpression('X')
class Example(grammar.TypeExpression):
def _validate_intersection_(self, other, handshake=False):
local['other'] = other
local['handshake'] = handshake
X._validate_intersection_(Example('Example'))
self.assertIs(local['other'], X)
self.assertTrue(local['handshake'])
def test_build_intersection(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
intersection = X._build_intersection_((X, Y))
self.assertIsInstance(intersection, grammar.IntersectionTypeExpression)
self.assertEqual(intersection.members, frozenset({X, Y}))
def test_validate_predicate_w_nonsense(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'predicate'):
X._validate_predicate_(42)
def test_validate_predicate_w_valid(self):
predicate = grammar.Predicate(True)
X = grammar.TypeExpression('X')
X._validate_predicate_(predicate)
# Test passed.
def test_apply_predicate(self):
predicate = grammar.Predicate(True)
Y = grammar.TypeExpression('Y')
X = grammar.TypeExpression('X', fields=(Y,))
result = X._apply_predicate_(predicate)
self.assertIsInstance(result, grammar.TypeExpression)
self.assertEqual(result.fields, (Y,))
def test_is_subtype_wrong_name(self):
Y = grammar.TypeExpression('Y')
X = grammar.TypeExpression('X')
self.assertFalse(Y._is_subtype_(X))
self.assertFalse(X._is_subtype_(Y))
def test_is_subtype_diff_fields(self):
F1 = grammar.TypeExpression('F1')
F2 = grammar.TypeExpression('F2')
X = grammar.TypeExpression('X', fields=(F1,))
X_ = grammar.TypeExpression('X', fields=(F2,))
self.assertFalse(X_._is_subtype_(X))
self.assertFalse(X._is_subtype_(X_))
def test_is_subtype_diff_predicates(self):
class Pred(grammar.Predicate):
def __init__(self, value):
self.value = value
super().__init__(value)
def _is_subtype_(self, other):
return self.value <= other.value
P1 = Pred(1)
P2 = Pred(2)
X = grammar.TypeExpression('X', predicate=P1)
X_ = grammar.TypeExpression('X', predicate=P2)
self.assertFalse(X_._is_subtype_(X))
self.assertTrue(X._is_subtype_(X_))
def test_is_subtype_matches(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertTrue(X._is_subtype_(X))
self.assertTrue(X_._is_subtype_(X))
self.assertTrue(X._is_subtype_(X_))
self.assertTrue(X_._is_subtype_(X_))
def test_is_subtype_matches_w_fields(self):
F1 = grammar.TypeExpression('F1')
F2 = grammar.TypeExpression('F2')
X = grammar.TypeExpression('X', fields=(F1,))
X_ = grammar.TypeExpression('X', fields=(F2,))
self.assertFalse(X_._is_subtype_(X))
self.assertFalse(X._is_subtype_(X_))
def test_is_subtype_matches_w_predicate(self):
class Pred(grammar.Predicate):
def __init__(self, value=0):
self.value = value
super().__init__(value)
def _is_subtype_(self, other):
return self.value <= other.value
P1 = Pred(1)
P1_ = Pred(1)
X = grammar.TypeExpression('X', predicate=P1)
X_ = grammar.TypeExpression('X', predicate=P1_)
self.assertTrue(X._is_subtype_(X))
self.assertTrue(X_._is_subtype_(X))
self.assertTrue(X._is_subtype_(X_))
self.assertTrue(X_._is_subtype_(X_))
class TestTypeExpressionMod(unittest.TestCase):
def setUp(self):
self.local = {}
def test_mod_w_existing_predicate(self):
X = grammar.TypeExpression('X', predicate=grammar.Predicate('Truthy'))
with self.assertRaisesRegex(TypeError, 'predicate'):
X % grammar.Predicate('Other')
def test_mod_w_none_predicate(self):
X = grammar.TypeExpression('X', predicate=None)
predicate = grammar.Predicate("Truthy")
self.assertIs((X % predicate).predicate, predicate)
def test_mod_w_none(self):
X = grammar.TypeExpression('X')
self.assertEqual(X % None, X)
def test_validate_predicate_called(self):
class Example(grammar.TypeExpression):
def _validate_predicate_(s, predicate):
self.local['predicate'] = predicate
example = Example('Example')
example % 42
self.assertEqual(self.local['predicate'], 42)
def test_apply_predicate_called(self):
class Example(grammar.TypeExpression):
def _validate_predicate_(s, predicate):
pass # Let anything through
def _apply_predicate_(s, predicate):
self.local['predicate'] = predicate
return ...
example = Example('Example')
new_type_expr = example % 'Foo'
self.assertEqual(self.local['predicate'], 'Foo')
self.assertIs(new_type_expr, ...)
class TestTypeExpressionOr(unittest.TestCase):
def setUp(self):
self.local = {}
def test_identity(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertIs(X | X_, X)
def test_several(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
Z = grammar.TypeExpression('Z')
self.assertIsInstance(X | Y | Z, grammar.UnionTypeExpression)
self.assertEqual(X | Y | Z | X | Z, Y | Z | X)
def test_validate_union_called(self):
class Example(grammar.TypeExpression):
def _validate_union_(s, other, handshake):
self.local['other'] = other
self.local['handshake'] = handshake
example = Example('Example')
example | 42
self.assertEqual(self.local['other'], 42)
self.assertFalse(self.local['handshake'])
def test_build_union_called(self):
class Example(grammar.TypeExpression):
def _validate_union_(s, other, handshake):
pass # Let anything through
def _build_union_(s, members):
self.local['members'] = members
return ...
example = Example('Example')
new_type_expr = example | 42
self.assertEqual(self.local['members'], (example, 42))
self.assertIs(new_type_expr, ...)
class TestTypeExpressionAnd(unittest.TestCase):
def setUp(self):
self.local = {}
def test_identity(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertIs(X & X_, X_)
def test_several(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
Z = grammar.TypeExpression('Z')
self.assertIsInstance(X & Y & Z, grammar.IntersectionTypeExpression)
self.assertEqual(X & Y & Z & X & Z, Y & Z & X)
def test_validate_intersection_called(self):
class Example(grammar.TypeExpression):
def _validate_intersection_(s, other, handshake):
self.local['other'] = other
self.local['handshake'] = handshake
example = Example('Example')
example & 42
self.assertEqual(self.local['other'], 42)
self.assertFalse(self.local['handshake'])
def test_build_intersection_called(self):
class Example(grammar.TypeExpression):
def _validate_intersection_(s, other, handshake):
pass # Let anything through
def _build_intersection_(s, members):
self.local['members'] = members
return ...
example = Example('Example')
new_type_expr = example & 42
self.assertEqual(self.local['members'], (example, 42))
self.assertIs(new_type_expr, ...)
class TestTypeExpressionLE(unittest.TestCase):
def setUp(self):
self.local = {}
def test_is_subtype_called(self):
class Example(grammar.TypeExpression):
def _is_subtype_(s, other):
self.local['other'] = other
return self.local['return']
example = Example('Example')
other = Example('Other')
self.local['return'] = True
result = example <= other
self.assertEqual(self.local['other'], other)
self.assertTrue(result)
self.local['return'] = False
result = example <= other
self.assertEqual(self.local['other'], other)
self.assertFalse(result)
class TestTypeExpressionGE(unittest.TestCase):
def setUp(self):
self.local = {}
def test_is_subtype_called(self):
class Example(grammar.TypeExpression):
def _is_subtype_(s, other):
self.local['other'] = other
return self.local['return']
example = Example('Example')
other = Example('Other')
self.local['return'] = True
result = example >= other
self.assertEqual(self.local['other'], example)
self.assertTrue(result)
self.local['return'] = False
result = example >= other
self.assertEqual(self.local['other'], example)
self.assertFalse(result)
# TODO: test the following:
# - _SetOperationBase
# - UnionTypeExpression
# - IntersectionTypeExpression
# - MappingTypeExpression
# - Predicate
if __name__ == '__main__':
unittest.main()
|
biocore/qiime2
|
qiime2/core/type/tests/test_grammar.py
|
Python
|
bsd-3-clause
| 20,736
|
[
"scikit-bio"
] |
28011fea8d88715da3190e3fee5102461734a2f2d864f42234fd146e15e1ccff
|
#!/usr/bin/env python
import argparse
import datetime
import json
import os
import re
from urllib.request import urlretrieve
import yaml
class PackagedAnnotationMeta():
@classmethod
def from_file(cls, fname):
meta = yaml.safe_load(open(fname))
return cls(meta)
def __init__(self, meta_dict):
if 'build' not in meta_dict:
meta_dict['build'] = datetime.date.today().isoformat()
if 'volume' not in meta_dict:
meta_dict['volume'] = 1
required_meta = ['name', 'build', 'volume', 'refgenome', 'records']
for key in required_meta:
if not meta_dict.get(key):
raise KeyError(
'Required info "{0}" missing from metadata'
.format(key)
)
required_record_meta = ['id', 'name', 'version', 'format', 'source']
for key in required_record_meta:
for record in meta_dict['records']:
if not record.get(key):
raise KeyError(
'{0}\n'
'Required info "{1}" missing from record metadata'
.format(record, key)
)
self.meta = meta_dict
self.meta['id'] = self._get_id()
def _get_id(self):
components = [
self.meta['name'],
self.meta['refgenome'],
str(self.meta['volume']),
str(self.meta['build'])
]
return '__'.join(
[
re.sub(r'[^a-zA-Z_0-9\-]', '', i.replace(' ', '_'))
for i in components
]
)
def records(self, full_record_names=False):
for record in self.meta['records']:
ret = record.copy()
if full_record_names:
ret['name'] = self._full_record_name(record)
yield ret
def fullname(self):
return '{0} ({1}, vol:{2}/build:{3})'.format(
self.meta['name'],
self.meta['refgenome'],
self.meta['volume'],
self.meta['build']
)
def _full_record_name(self, record):
return '{0} ({1}, {2}; from {3}/vol:{4}/build:{5})'.format(
record['name'], record['version'],
self.meta['refgenome'],
self.meta['name'],
self.meta['volume'],
self.meta['build']
)
def dump(self, fname):
with open(fname, 'w') as fo:
yaml.dump(
self.meta, fo, allow_unicode=False, default_flow_style=False
)
def fetch_data(source_url, target_file):
final_file, headers = urlretrieve(source_url, target_file)
def meta_to_dm_records(meta, dbkey=None):
data_table_rows = []
for record in meta.records(full_record_names=True):
data_table_rows.append(
{
'value': '{0}:{1}'.format(meta.meta['id'], record['id']),
'dbkey': dbkey or meta.meta['refgenome'],
'data_name': record['name'],
'data_id': record['id'],
'data_format': record['format'],
'package_id': meta.meta['id'],
'package_name': meta.fullname(),
'path': '{0}/{1}'.format(
meta.meta['volume'],
meta.meta['build']
)
}
)
return data_table_rows
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('metadata')
parser.add_argument(
'-o', '--galaxy-datamanager-json',
required=True
)
parser.add_argument('-t', '--target-directory', default=None)
parser.add_argument('--dbkey', default=None)
args = parser.parse_args()
if args.target_directory:
if not os.path.isdir(args.target_directory):
os.mkdir(args.target_directory)
else:
args.target_directory = os.getcwd()
meta = PackagedAnnotationMeta.from_file(args.metadata)
for record in meta.records():
fetch_data(
record['source'],
os.path.join(args.target_directory, record['id'])
)
meta.dump(os.path.join(args.target_directory, 'meta.yml'))
# Finally, we prepare the metadata for the new data table record ...
data_manager_dict = {
'data_tables': {
'packaged_annotation_data': meta_to_dm_records(meta, args.dbkey)
}
}
# ... and save it to the json results file
with open(args.galaxy_datamanager_json, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
|
davebx/tools-iuc
|
data_managers/data_manager_packaged_annotation_data/data_manager/install_packaged_annotation_data.py
|
Python
|
mit
| 4,611
|
[
"Galaxy"
] |
3ea2520e44280d4bc54f513924a18912252c6b4c2926f0ecfd5c07b47aac942e
|
from __future__ import absolute_import
from builtins import zip
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from nose.tools import (assert_equal, assert_not_equal, raises, assert_true,
assert_false)
from nose.plugins.skip import SkipTest
from .test_helpers import (CallIdentity, prepend_exception_message,
make_1d_traj, raises_with_message_like,
CalvinistDynamics)
import openpathsampling as paths
import openpathsampling.engines.openmm as peng
from openpathsampling.ensemble import *
import logging
logging.getLogger('openpathsampling.ensemble').setLevel(logging.DEBUG)
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
logger = logging.getLogger('openpathsampling.tests.testensemble')
import re
import random
def wrap_traj(traj, start, length):
"""Wraps the traj such that the original traj starts at frame `start`
and is of length `length` by padding beginning with traj[0] and end with
traj[-1]. Used to test the slice restricted trajectories."""
if (start < 0) or (length < len(traj)+start):
raise ValueError("""wrap_traj: start < 0 or length < len(traj)+start
{0} < 0 or {1} < {2}+{0}""".format(
start, length, len(traj)))
outtraj = traj[:] # shallow copy
# prepend
for i in range(start):
outtraj.insert(0, traj[0])
# append
for i in range(length - (len(traj)+start)):
outtraj.append(traj[-1])
return outtraj
def test_wrap_traj():
"""Testing wrap_traj (oh gods, the meta! a test for a test function!)"""
intraj = [1, 2, 3]
assert_equal(wrap_traj(intraj, 3, 6), [1, 1, 1, 1, 2, 3])
assert_equal(wrap_traj(intraj, 3, 8), [1, 1, 1, 1, 2, 3, 3, 3])
assert_equal(wrap_traj(intraj, 3, 8)[slice(3, 6)], intraj)
def build_trajdict(trajtypes, lower, upper):
upperadddict = {'a' : 'in', 'b' : 'out', 'c' : 'cross', 'o' : 'hit'}
loweradddict = {'a' : 'out', 'b' : 'in', 'c' : 'in', 'o' : 'hit'}
lowersubdict = {'a' : 'in', 'b' : 'out', 'c' : 'cross', 'o' : 'hit'}
uppersubdict = {'a' : 'out', 'b' : 'in', 'c' : 'in', 'o' : 'hit'}
adjustdict = {'a' : (lambda x: -0.05*x), 'b' : (lambda x: 0.05*x),
'c' : (lambda x: 0.05*x + 0.16), 'o' : (lambda x: 0.0)}
mydict = {}
for mystr in trajtypes:
upperaddkey = "upper"
uppersubkey = "upper"
loweraddkey = "lower"
lowersubkey = "lower"
delta = []
for char in mystr:
upperaddkey += "_"+upperadddict[char]
loweraddkey += "_"+loweradddict[char]
uppersubkey += "_"+uppersubdict[char]
lowersubkey += "_"+lowersubdict[char]
delta.append(adjustdict[char](random.randint(1, 4)))
mydict[upperaddkey] = list(map(upper.__add__, delta))
mydict[loweraddkey] = list(map(lower.__add__, delta))
mydict[uppersubkey] = list(map(upper.__sub__, delta))
mydict[lowersubkey] = list(map(lower.__sub__, delta))
return mydict
def tstr(ttraj):
return list(ttraj).__str__()
def results_upper_lower(adict):
res_dict = {}
for test in list(adict.keys()):
res_dict['upper_'+test] = adict[test]
res_dict['lower_'+test] = adict[test]
return res_dict
def setUp():
''' Setup for tests of classes in ensemble.py. '''
#random.seed
global lower, upper, op, vol1, vol2, vol3, ttraj
lower = 0.1
upper = 0.5
op = paths.FunctionCV("Id", lambda snap : snap.coordinates[0][0])
vol1 = paths.CVDefinedVolume(op, lower, upper).named('stateA')
vol2 = paths.CVDefinedVolume(op, -0.1, 0.7).named('interface0')
vol3 = paths.CVDefinedVolume(op, 2.0, 2.5).named('stateB')
# we use the following codes to describe trajectories:
# in : in the state
# out : out of the state
# hit : on the state border
#
# deltas of each letter from state edge:
# a < 0 ; 0 < b < 0.2 ; c > 0.2; o = 0
trajtypes = ["a", "o", "aa", "ab", "aob", "bob", "aba", "aaa", "abcba",
"abaa", "abba", "abaab", "ababa", "abbab", "ac", "bc",
"abaaba", "aobab", "abab", "abcbababcba", "aca", "abc",
"acaca", "acac", "caca", "aaca", "baca", "aaba", "aab",
"aabbaa", "abbb", "aaab"
]
ttraj = build_trajdict(trajtypes, lower, upper)
# make the tests from lists into trajectories
for test in list(ttraj.keys()):
ttraj[test] = make_1d_traj(coordinates=ttraj[test],
velocities=[1.0]*len(ttraj[test]))
def in_out_parser(testname):
allowed_parts = ['in', 'out']
parts = re.split("_", testname)
res = []
for part in parts:
to_append = None
if part in allowed_parts:
to_append = part
elif part == 'hit':
if 'upper' in parts:
to_append = 'in'
elif 'lower' in parts:
to_append = 'in'
elif part == 'cross':
to_append = 'out'
if to_append != None:
if res == []:
res.append(to_append)
elif to_append != res[-1]:
res.append(to_append)
return res
class EnsembleTest(object):
def __init__(self):
self.length0 = LengthEnsemble(0)
def _single_test(self, ensemble_fcn, traj, res, failmsg):
try:
assert_equal(ensemble_fcn(traj), res)
except AssertionError as e:
prepend_exception_message(e, failmsg)
raise
def _test_everything(self, test_fcn, non_default=[], default=False):
"""
Runs tests using *all* the trajectory test suite. This is the
ultimate in test-running simplicity!!
"""
results = {}
for test in list(ttraj.keys()):
results[test] = default
nondef_dict = {}
for test in non_default:
if test in list(ttraj.keys()):
results[test] = not default
if "lower_"+test in list(ttraj.keys()):
results["lower_"+test] = not default
if "upper_"+test in list(ttraj.keys()):
results["upper_"+test] = not default
for test in list(results.keys()):
logging.getLogger('openpathsampling.ensemble').debug(
"Starting test for " + test + "("+str(ttraj[test])+")"
)
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(test_fcn, ttraj[test], results[test], failmsg)
def _run(self, results):
"""Actually run tests on the trajectory and the wrapped trajectory.
Nearly all of the tests are just this simple. By creating custom error
messages (using prepend_exception_message) we can wrap the many tests
into loops instead of making tons of lines of code.
"""
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.ensemble, ttraj[test], results[test], failmsg)
wrapped = wrap_traj(ttraj[test], self.wrapstart, self.wrapend)
lentt = len(ttraj[test])
failmsg = "Failure in wrapped "+test+"("+str(ttraj[test])+"): "
self._single_test(self.ensemble, wrapped, results[test], failmsg)
failmsg = "Failure in slice_ens "+test+"("+str(ttraj[test])+"): "
self._single_test(self.slice_ens, wrapped, results[test], failmsg)
class testPartOutXEnsemble(EnsembleTest):
def setUp(self):
self.leaveX = PartOutXEnsemble(vol1)
def test_leaveX(self):
"""PartOutXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = True
else:
res = False
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.leaveX, ttraj[test], res, failmsg)
def test_invert(self):
inverted = ~self.leaveX
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(inverted, ttraj[test], res, failmsg)
def test_can_append(self):
self._test_everything(self.leaveX.can_append, default=True)
def test_can_prepend(self):
self._test_everything(self.leaveX.can_prepend, default=True)
def test_strict_can_append(self):
self._test_everything(self.leaveX.strict_can_append, default=True)
def test_strict_can_prepend(self):
self._test_everything(self.leaveX.strict_can_prepend, default=True)
def test_leaveX_0(self):
"""PartOutXEnsemble treatment of zero-length trajectory"""
assert_equal(self.leaveX(paths.Trajectory([])), False)
assert_equal(self.leaveX.can_append(paths.Trajectory([])), True)
assert_equal(self.leaveX.can_prepend(paths.Trajectory([])), True)
def test_leaveX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.leaveX.__str__(),
"exists t such that x[t] in (not "+volstr+")")
class testAllInXEnsemble(EnsembleTest):
def setUp(self):
self.inX = AllInXEnsemble(vol1)
def test_inX(self):
"""AllInXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX, ttraj[test], res, failmsg)
def test_can_append(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.can_append, ttraj[test], res, failmsg)
def test_can_prepend(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.can_prepend, ttraj[test], res,
failmsg)
def test_strict_can_append(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.strict_can_append, ttraj[test], res,
failmsg)
def test_strict_can_prepend(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.strict_can_prepend, ttraj[test], res,
failmsg)
def test_inX_0(self):
"""AllInXEnsemble treatment of zero-length trajectory"""
assert_equal(self.inX(paths.Trajectory([])), False)
assert_equal(self.inX.can_append(paths.Trajectory([])), True)
assert_equal(self.inX.can_prepend(paths.Trajectory([])), True)
def test_inX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.inX.__str__(),
"x[t] in "+volstr+" for all t")
class testAllOutXEnsemble(EnsembleTest):
def setUp(self):
self.outX = AllOutXEnsemble(vol1)
def test_outX(self):
"""AllOutXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX, ttraj[test], res, failmsg)
def test_can_append(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.can_append, ttraj[test], res, failmsg)
def test_can_prepend(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.can_prepend, ttraj[test], res, failmsg)
def test_strict_can_append(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.strict_can_append, ttraj[test], res,
failmsg)
def test_strict_can_prepend(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.strict_can_prepend, ttraj[test],
res, failmsg)
def test_outX_0(self):
"""AllOutXEnsemble treatment of zero-length trajectory"""
assert_equal(self.outX(paths.Trajectory([])), False)
assert_equal(self.outX.can_append(paths.Trajectory([])), True)
assert_equal(self.outX.can_prepend(paths.Trajectory([])), True)
def test_outX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.outX.__str__(),
"x[t] in (not "+volstr+") for all t")
class testPartInXEnsemble(EnsembleTest):
def setUp(self):
self.hitX = PartInXEnsemble(vol1)
def test_hitX(self):
"""PartInXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = True
else:
res = False
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.hitX, ttraj[test], res, failmsg)
def test_can_append(self):
self._test_everything(self.hitX.can_append, default=True)
def test_can_prepend(self):
self._test_everything(self.hitX.can_prepend, default=True)
def test_strict_can_append(self):
self._test_everything(self.hitX.strict_can_append, default=True)
def test_strict_can_prepend(self):
self._test_everything(self.hitX.strict_can_prepend, default=True)
def test_hitX_0(self):
"""PartInXEnsemble treatment of zero-length trajectory"""
assert_equal(self.hitX(paths.Trajectory([])), False)
assert_equal(self.hitX.can_append(paths.Trajectory([])), True)
assert_equal(self.hitX.can_prepend(paths.Trajectory([])), True)
def test_hitX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.hitX.__str__(),
"exists t such that x[t] in "+volstr)
class testExitsXEnsemble(EnsembleTest):
def setUp(self):
self.ensemble = ExitsXEnsemble(vol1)
# longest ttraj is 6 = 9-3 frames long
self.slice_ens = ExitsXEnsemble(vol1, slice(3,9))
self.wrapstart = 3
self.wrapend = 12
def test_noncrossing(self):
'''ExitsXEnsemble for noncrossing trajectories'''
results = { 'upper_in' : False,
'upper_out' : False,
'lower_in' : False,
'lower_out' : False
}
self._run(results)
def test_hitsborder(self):
'''ExitsXEnsemble for border-hitting trajectories'''
results = { 'lower_in_hit_in' : False,
'upper_in_hit_in' : False,
'lower_out_hit_out' : True,
'upper_out_hit_out' : True
}
self._run(results)
def test_exit(self):
'''ExitsXEnsemble for exiting trajecories'''
results = { 'lower_in_out' : True,
'upper_in_out' : True,
'lower_in_hit_out' : True,
'upper_in_hit_out' : True,
'lower_out_in_out_in' : True,
'upper_out_in_out_in' : True,
'lower_in_out_in_out' : True,
'upper_in_out_in_out' : True
}
self._run(results)
def test_entrance(self):
'''ExitsXEnsemble for entering trajectories'''
results = { 'lower_out_in' : False,
'upper_out_in' : False,
'lower_out_hit_in' : False,
'upper_out_hit_in' : False,
'lower_out_in_out_in' : True,
'upper_out_in_out_in' : True,
'lower_in_out_in_out' : True,
'upper_in_out_in_out' : True
}
self._run(results)
def test_str(self):
assert_equal(self.ensemble.__str__(),
'exists x[t], x[t+1] such that x[t] in {0} and x[t+1] not in {0}'.format(vol1))
class testEntersXEnsemble(testExitsXEnsemble):
def setUp(self):
self.ensemble = EntersXEnsemble(vol1)
# longest ttraj is 6 = 9-3 frames long
self.slice_ens = EntersXEnsemble(vol1, slice(3,9))
self.wrapstart = 3
self.wrapend = 12
def test_noncrossing(self):
'''EntersXEnsemble for noncrossing trajectories'''
results = { 'upper_in_in_in' : False,
'upper_out_out_out' : False,
'lower_in_in_in' : False,
'lower_out_out_out' : False
}
self._run(results)
def test_hitsborder(self):
'''EntersXEnsemble for border-hitting trajectories'''
results = { 'lower_in_hit_in' : False,
'upper_in_hit_in' : False,
'lower_out_hit_out' : True,
'upper_out_hit_out' : True
}
self._run(results)
def test_exit(self):
'''EntersXEnsemble for exiting trajecories'''
results = { 'lower_in_out' : False,
'upper_in_out' : False,
'lower_in_hit_out' : False,
'upper_in_hit_out' : False,
'lower_out_in_out_in' : True,
'upper_out_in_out_in' : True,
'lower_in_out_in_out' : True,
'upper_in_out_in_out' : True
}
self._run(results)
def test_entrance(self):
'''EntersXEnsemble for entering trajectories'''
results = { 'lower_out_in' : True,
'upper_out_in' : True,
'lower_out_hit_in' : True,
'upper_out_hit_in' : True,
'lower_out_in_out_in' : True,
'upper_out_in_out_in' : True,
'lower_in_out_in_out' : True,
'upper_in_out_in_out' : True
}
self._run(results)
def test_str(self):
assert_equal(self.ensemble.__str__(),
'exists x[t], x[t+1] such that x[t] not in {0} and x[t+1] in {0}'.format(vol1))
class testSequentialEnsemble(EnsembleTest):
def setUp(self):
self.inX = AllInXEnsemble(vol1)
self.outX = AllOutXEnsemble(vol1)
self.hitX = PartInXEnsemble(vol1)
self.leaveX = PartOutXEnsemble(vol1)
self.enterX = EntersXEnsemble(vol1)
self.exitX = ExitsXEnsemble(vol1)
self.inInterface = AllInXEnsemble(vol2)
self.leaveX0 = PartOutXEnsemble(vol2)
self.inX0 = AllInXEnsemble(vol2)
self.length1 = LengthEnsemble(1)
# pseudo_tis and pseudo_minus assume that the interface is equal to
# the state boundary
self.pseudo_tis = SequentialEnsemble( [
self.inX & self.length1,
self.outX,
self.inX & self.length1 ]
)
self.pseudo_minus = SequentialEnsemble( [
self.inX & self.length1,
self.outX,
self.inX,
self.outX,
self.inX & self.length1 ]
)
self.tis = SequentialEnsemble([
self.inX & self.length1,
self.outX & self.leaveX0,
self.inX & self.length1,
])
@raises(ValueError)
def test_maxminoverlap_size(self):
"""SequentialEnsemble errors if max/min overlap sizes different"""
SequentialEnsemble([self.inX, self.outX, self.inX], (0,0), (0,0,0))
@raises(ValueError)
def test_maxoverlap_ensemble_size(self):
"""SequentialEnsemble errors if overlap sizes don't match ensemble size"""
SequentialEnsemble([self.inX, self.outX, self.inX], (0,0,0), (0,0,0))
@raises(ValueError)
def test_minmax_order(self):
"""SequentialEnsemble errors if min_overlap > max_overlap"""
SequentialEnsemble([self.inX, self.outX, self.inX], (0,1), (0,0))
def test_allowed_initializations(self):
"""SequentialEnsemble initializes correctly with defaults"""
A = SequentialEnsemble([self.inX, self.outX, self.inX], (0,0), (0,0))
B = SequentialEnsemble([self.inX, self.outX, self.inX],0,0)
C = SequentialEnsemble([self.inX, self.outX, self.inX])
assert_equal(A.min_overlap,B.min_overlap)
assert_equal(A.min_overlap,C.min_overlap)
assert_equal(A.max_overlap,B.max_overlap)
assert_equal(A.max_overlap,C.max_overlap)
def test_overlap_max(self):
"""SequentialEnsemble allows overlaps up to overlap max, no more"""
raise SkipTest
def test_overlap_min(self):
"""SequentialEnsemble requires overlaps of at least overlap min"""
raise SkipTest
def test_overlap_max_inf(self):
"""SequentialEnsemble works if max overlap in infinite"""
raise SkipTest
def test_overlap_min_gap(self):
"""SequentialEnsemble works in mix overlap is negative (gap)"""
raise SkipTest
def test_overlap_max_gap(self):
"""SequentialEnsemble works if max overlap is negative (gap)"""
raise SkipTest
def test_seqens_order_combo(self):
# regression test for #229
import numpy as np
op = paths.FunctionCV(name="x", f=lambda snap : snap.xyz[0][0])
bigvol = paths.CVDefinedVolume(collectivevariable=op,
lambda_min=-100.0, lambda_max=100.0)
traj = paths.Trajectory([
peng.MDSnapshot(
coordinates=np.array([[-0.5, 0.0]]),
velocities=np.array([[0.0,0.0]])
)
])
vol_ens = paths.AllInXEnsemble(bigvol)
len_ens = paths.LengthEnsemble(5)
combo1 = vol_ens & len_ens
combo2 = len_ens & vol_ens
seq1 = SequentialEnsemble([combo1])
seq2 = SequentialEnsemble([combo2])
logger.debug("Checking combo1")
assert_equal(combo1.can_append(traj), True)
logger.debug("Checking combo2")
assert_equal(combo2.can_append(traj), True)
logger.debug("Checking seq1")
assert_equal(seq1.can_append(traj), True)
logger.debug("Checking seq2")
assert_equal(seq2.can_append(traj), True)
def test_can_append_tis(self):
"""SequentialEnsemble as TISEnsemble knows when it can append"""
results = { 'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : False,
'lower_out_in' : False,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.can_append,
ttraj[test], results[test], failmsg)
def test_strict_can_append_tis(self):
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : False,
'lower_out_in' : False,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.strict_can_append,
ttraj[test], results[test], failmsg)
def test_can_append_pseudominus(self):
"""SequentialEnsemble as Pseudo-MinusEnsemble knows when it can append"""
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : True,
'lower_in_in_in' : True,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : True,
'lower_in_out_in_in' : True,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : True,
'lower_in_out_in_in_out' : True,
'upper_out_in_out' : True,
'lower_out_in_out' : True,
'upper_out_in_in_out' : True,
'lower_out_in_in_out' : True,
'upper_out_in_out_in': False,
'lower_out_in_out_in': False,
'upper_out_in_in_out_in' : False,
'lower_out_in_in_out_in' : False,
'upper_in_cross_in' : True,
'lower_in_cross_in' : True,
'upper_in_cross_in_cross' : True,
'lower_in_cross_in_cross' : True,
'upper_cross_in_cross_in' : False,
'lower_cross_in_cross_in' : False,
'upper_in_cross_in_cross_in' : False,
'lower_in_cross_in_cross_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.can_append,
ttraj[test], results[test], failmsg)
def test_strict_can_append_pseudominus(self):
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : False,
'lower_out_in' : False,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : True,
'lower_in_out_in_in' : True,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : True,
'lower_in_out_in_in_out' : True,
'upper_out_in_out' : False,
'lower_out_in_out' : False,
'upper_out_in_in_out' : False,
'lower_out_in_in_out' : False,
'upper_out_in_out_in': False,
'lower_out_in_out_in': False,
'upper_out_in_in_out_in' : False,
'lower_out_in_in_out_in' : False,
'upper_in_cross_in' : True,
'lower_in_cross_in' : True,
'upper_in_cross_in_cross' : True,
'lower_in_cross_in_cross' : True,
'upper_cross_in_cross_in' : False,
'lower_cross_in_cross_in' : False,
'upper_in_cross_in_cross_in' : False,
'lower_in_cross_in_cross_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.strict_can_append,
ttraj[test], results[test], failmsg)
def test_can_prepend_pseudo_tis(self):
"""SequentialEnsemble as Pseudo-TISEnsemble knows when it can prepend"""
results = {
'upper_in_out' : False,
'lower_in_out' : False,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.can_prepend,
ttraj[test], results[test], failmsg)
def test_strict_can_prepend_pseudo_tis(self):
results = {
'upper_in_out' : False,
'lower_in_out' : False,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.strict_can_prepend,
ttraj[test], results[test], failmsg)
def test_can_prepend_pseudo_minus(self):
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : True,
'lower_in_in_in' : True,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : False,
'lower_in_out_in_in_out' : False,
'upper_out_in_out' : True,
'lower_out_in_out' : True,
'upper_out_in_in_out' : True,
'lower_out_in_in_out' : True,
'upper_out_in_out_in': True,
'lower_out_in_out_in': True,
'upper_out_in_in_out_in' : True,
'lower_out_in_in_out_in' : True
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.can_prepend,
ttraj[test], results[test], failmsg)
def test_strict_can_prepend_pseudo_minus(self):
results = {
'upper_in_out' : False,
'lower_in_out' : False,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : False,
'lower_in_out_in_in_out' : False,
'upper_out_in_out' : False,
'lower_out_in_out' : False,
'upper_out_in_in_out' : False,
'lower_out_in_in_out' : False,
'upper_out_in_out_in': True,
'lower_out_in_out_in': True,
'upper_out_in_in_out_in' : True,
'lower_out_in_in_out_in' : True
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.strict_can_prepend,
ttraj[test], results[test], failmsg)
def test_sequential_transition_frames(self):
"""SequentialEnsemble identifies transitions frames correctly"""
ensemble = SequentialEnsemble([self.inX, self.outX])
results = {'upper_in_in_in' : [3],
'upper_out_out_out' : [],
'upper_in_out_in' : [1,2],
'upper_in_out' : [1,2]
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(ensemble.transition_frames,
ttraj[test], results[test], failmsg)
def test_sequential_simple_in_out_call(self):
"""Simplest sequential ensemble identifies correctly"""
ensemble = SequentialEnsemble([self.inX, self.outX])
results = {'upper_in_in_in' : False,
'upper_out_out_out' : False,
'upper_in_out_in' : False,
'upper_in_out' : True
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(ensemble,
ttraj[test], results[test], failmsg)
def test_sequential_in_out(self):
"""SequentialEnsembles based on In/AllOutXEnsemble"""
# idea: for each ttraj, use the key name to define in/out behavior,
# dynamically construct a SequentialEnsemble
ens_dict = {'in' : self.inX, 'out' : self.outX }
for test in list(ttraj.keys()):
ens_list = in_out_parser(test)
ens = []
# how to pick ensembles is specific to this test
for ens_type in ens_list:
ens.append(ens_dict[ens_type])
ensemble = SequentialEnsemble(ens)
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(ensemble, ttraj[test], True, failmsg)
def test_sequential_pseudo_tis(self):
"""SequentialEnsemble as Pseudo-TISEnsemble identifies paths"""
results = {}
for test in list(ttraj.keys()):
results[test] = False
results['upper_in_out_in'] = True
results['lower_in_out_in'] = True
results['upper_in_out_out_in'] = True
results['lower_in_out_out_in'] = True
results['lower_in_out_cross_out_in'] = True
results['upper_in_out_cross_out_in'] = True
results['upper_in_cross_in'] = True
results['lower_in_cross_in'] = True
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis, ttraj[test], results[test],
failmsg)
def test_sequential_pseudo_minus(self):
"""SequentialEnsemble as Pseudo-MinusEnsemble identifies paths"""
results = {}
for test in list(ttraj.keys()):
results[test] = False
results['upper_in_out_in_out_in'] = True
results['lower_in_out_in_out_in'] = True
results['upper_in_out_in_in_out_in'] = True
results['lower_in_out_in_in_out_in'] = True
results['upper_in_cross_in_cross_in'] = True
results['lower_in_cross_in_cross_in'] = True
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus, ttraj[test], results[test],
failmsg)
def test_sequential_tis(self):
"""SequentialEnsemble as TISEnsemble identifies paths"""
results = {}
for test in list(ttraj.keys()):
results[test] = False
results['upper_in_out_cross_out_in'] = True
results['lower_in_out_cross_out_in'] = True
results['upper_in_cross_in'] = True
results['lower_in_cross_in'] = True
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.tis, ttraj[test], results[test], failmsg)
def test_sequential_generate_first_tis(self):
"""SequentialEnsemble to generate the first TIS path"""
ensemble = SequentialEnsemble([
self.outX | self.length0,
self.inX,
self.outX & self.leaveX0,
self.inX & self.length1
])
match_results = {
'upper_in_in_cross_in' : True,
'lower_in_in_cross_in' : True,
'upper_out_in_cross_in' : True,
'lower_out_in_cross_in' : True,
'upper_in_cross_in' : True,
'lower_in_cross_in' : True
}
logging.getLogger('openpathsampling.ensemble').info("Starting tests....")
for test in list(match_results.keys()):
failmsg = "Match failure in "+test+"("+str(ttraj[test])+"): "
logging.getLogger('openpathsampling.ensemble').info(
"Testing: "+str(test)
)
self._single_test(ensemble, ttraj[test],
match_results[test], failmsg)
append_results = {
'upper_in' : True,
'upper_in_in_in' : True,
'upper_in_in_out' : True,
'upper_in_in_out_in' : False,
'upper_out' : True,
'upper_out_out_out' : True,
'upper_out_in_out' : True,
'upper_out_in_out_in' : False
}
for test in list(append_results.keys()):
failmsg = "Append failure in "+test+"("+str(ttraj[test])+"): "
logging.getLogger('opentis.ensemble').info(
"Testing: "+str(test)
)
self._single_test(ensemble.can_append, ttraj[test],
append_results[test], failmsg)
def test_sequential_enter_exit(self):
"""SequentialEnsembles based on Enters/ExitsXEnsemble"""
# TODO: this includes a test of the overlap ability
raise SkipTest
def test_str(self):
assert_equal(self.pseudo_tis.__str__(), """[
(
x[t] in {x|Id(x) in [0.1, 0.5]} for all t
)
and
(
len(x) = 1
),
x[t] in (not {x|Id(x) in [0.1, 0.5]}) for all t,
(
x[t] in {x|Id(x) in [0.1, 0.5]} for all t
)
and
(
len(x) = 1
)
]""")
class testSequentialEnsembleCombination(EnsembleTest):
# testing EnsembleCombinations of SequentialEnsembles -- this is mainly
# useful to making sure that the ensemble combination of strict_can_*
# works correctly, since this is where strict and normal have a
# distinction
def setUp(self):
self.ens1 = SequentialEnsemble([
AllInXEnsemble(vol1) & LengthEnsemble(1),
AllOutXEnsemble(vol1) & PartOutXEnsemble(vol2),
AllInXEnsemble(vol1) & LengthEnsemble(1)
])
self.ens2 = SequentialEnsemble([
AllInXEnsemble(vol1) & LengthEnsemble(1),
LengthEnsemble(3),
AllInXEnsemble(vol1) & LengthEnsemble(1)
])
self.combo_and = self.ens1 & self.ens2
self.combo_or = self.ens1 | self.ens2
def test_call(self):
ens1_passes = [
'in_cross_in',
'in_out_cross_in',
'in_out_cross_out_in'
]
self._test_everything(self.ens1, ens1_passes, False)
ens2_passes = [
'in_out_cross_out_in',
'in_out_in_out_in',
'in_cross_in_cross_in'
]
self._test_everything(self.ens2, ens2_passes, False)
or_passes = list(set(ens1_passes + ens2_passes))
self._test_everything(self.combo_or, or_passes, False)
and_passes = list(set(ens1_passes) & set(ens2_passes))
self._test_everything(self.combo_and, and_passes, False)
def test_can_append(self):
ens1_true = [
'hit',
'in',
'in_cross',
'in_out',
'in_out_cross',
'in_out_out_out',
'out',
'out_cross',
'out_out',
'out_out_out'
]
self._test_everything(self.ens1.can_append, ens1_true, False)
ens2_true = [
'hit',
'in',
'in_cross',
'in_cross_in',
'in_cross_in_cross',
'in_hit_in',
'in_hit_out',
'in_in',
'in_in_cross_in',
'in_in_in',
'in_in_in_out',
'in_in_out',
'in_in_out_in',
'in_out',
'in_out_cross',
'in_out_in',
'in_out_in_in',
'in_out_in_out',
'in_out_out_in',
'in_out_out_out',
'out',
'out_cross',
'out_hit_in',
'out_hit_out',
'out_in',
'out_in_in',
'out_in_out',
'out_out',
'out_out_in',
'out_out_out'
]
self._test_everything(self.ens2.can_append, ens2_true, False)
or_true = list(set(ens1_true + ens2_true))
self._test_everything(self.combo_or.can_append, or_true, False)
and_true = list(set(ens1_true) & set(ens2_true))
self._test_everything(self.combo_and.can_append, and_true, False)
def test_can_prepend(self):
ens1_true = [
'hit',
'in',
'out',
'out_cross',
'out_in',
'out_out',
'out_out_in',
'out_out_out',
'out_out_out_in'
]
self._test_everything(self.ens1.can_prepend, ens1_true, False)
ens2_true = [
'cross_in_cross_in',
'hit',
'in',
'in_cross',
'in_cross_in',
'in_hit_in',
'in_hit_out',
'in_in',
'in_in_cross_in',
'in_in_in',
'in_in_out',
'in_in_out_in',
'in_out',
'in_out_cross',
'in_out_in',
'in_out_in_in',
'in_out_out_in',
'out',
'out_cross',
'out_hit_in',
'out_hit_out',
'out_in',
'out_in_cross_in',
'out_in_in',
'out_in_in_in',
'out_in_out',
'out_in_out_in',
'out_out',
'out_out_in',
'out_out_out',
'out_out_out_in'
]
self._test_everything(self.ens2.can_prepend, ens2_true, False)
or_true = list(set(ens1_true + ens2_true))
self._test_everything(self.combo_or.can_prepend, or_true, False)
and_true = list(set(ens1_true) & set(ens2_true))
self._test_everything(self.combo_and.can_prepend, and_true, False)
def test_strict_can_append(self):
ens1_true = [
'hit',
'in',
'in_cross',
'in_out',
'in_out_cross',
'in_out_out_out',
]
self._test_everything(self.ens1.strict_can_append, ens1_true, False)
ens2_true = [
'hit',
'in',
'in_cross',
'in_cross_in',
'in_cross_in_cross',
'in_hit_in',
'in_hit_out',
'in_in',
'in_in_cross_in',
'in_in_in',
'in_in_in_out',
'in_in_out',
'in_in_out_in',
'in_out',
'in_out_cross',
'in_out_in',
'in_out_in_in',
'in_out_in_out',
'in_out_out_in',
'in_out_out_out',
]
self._test_everything(self.ens2.strict_can_append, ens2_true, False)
or_true = list(set(ens1_true + ens2_true))
self._test_everything(self.combo_or.strict_can_append, or_true, False)
and_true = list(set(ens1_true) & set(ens2_true))
self._test_everything(self.combo_and.strict_can_append, and_true, False)
def test_strict_can_prepend(self):
ens1_true = [
'hit',
'in',
'out_in',
'out_out_in',
'out_out_out_in'
]
self._test_everything(self.ens1.strict_can_prepend, ens1_true, False)
ens2_true = [
'cross_in_cross_in',
'hit',
'in',
'in_cross_in',
'in_hit_in',
'in_in',
'in_in_cross_in',
'in_in_in',
'in_in_out_in',
'in_out_in',
'in_out_in_in',
'in_out_out_in',
'out_hit_in',
'out_in',
'out_in_cross_in',
'out_in_in',
'out_in_in_in',
'out_in_out_in',
'out_out_in',
'out_out_out_in'
]
self._test_everything(self.ens2.strict_can_prepend, ens2_true, False)
or_true = list(set(ens1_true + ens2_true))
self._test_everything(self.combo_or.strict_can_prepend, or_true, False)
and_true = list(set(ens1_true) & set(ens2_true))
self._test_everything(self.combo_and.strict_can_prepend, and_true, False)
class testTISEnsemble(EnsembleTest):
def setUp(self):
self.tis = TISEnsemble(vol1, vol3, vol2, op)
self.traj = ttraj['upper_in_out_cross_out_in']
self.minl = min(op(self.traj))
self.maxl = max(op(self.traj))
def test_tis_trajectory_summary(self):
summ = self.tis.trajectory_summary(self.traj)
assert_equal(summ['initial_state'], 0)
assert_equal(summ['final_state'], 0)
assert_equal(summ['max_lambda'], self.maxl)
assert_equal(summ['min_lambda'], self.minl)
def test_tis_trajectory_summary_str(self):
mystr = self.tis.trajectory_summary_str(self.traj)
teststr = ("initial_state=stateA final_state=stateA min_lambda=" +
str(self.minl) + " max_lambda=" + str(self.maxl) + " ")
assert_equal(mystr, teststr)
def test_tis_ensemble_candidate(self):
tis = TISEnsemble(vol1, vol3, vol2, op, lambda_i=0.7)
test_f = lambda t: tis(t, candidate=True)
results = {}
upper_keys = [k for k in list(ttraj.keys()) if k[:6] == "upper_"]
for test in upper_keys:
results[test] = False
# results where we SHOULD get True
results['upper_in_out_cross_out_in'] = True
results['upper_in_cross_in'] = True
# results where the input isn't actually a candidate trajectory, so
# it accepts the path even though it shouldn't
results['upper_in_cross_in_cross_in'] = True
results['upper_in_out_cross_out_in_out_in_out_cross_out_in'] = True
results['upper_in_in_cross_in'] = True
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(test_f, ttraj[test], results[test], failmsg)
class EnsembleCacheTest(EnsembleTest):
def _was_cache_reset(self, cache):
return cache.contents == { }
class testEnsembleCache(EnsembleCacheTest):
def setUp(self):
self.fwd = EnsembleCache(direction=+1)
self.rev = EnsembleCache(direction=-1)
self.traj = ttraj['lower_in_out_in_in_out_in']
def test_initially_reset(self):
assert_equal(self._was_cache_reset(self.fwd), True)
assert_equal(self._was_cache_reset(self.rev), True)
def test_change_trajectory(self):
traj2 = ttraj['lower_in_out_in']
# tests for forward
self.fwd.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.fwd), False)
self.fwd.check(self.traj)
assert_equal(self._was_cache_reset(self.fwd), True)
self.fwd.contents['ens_num'] = 1
assert_equal(self._was_cache_reset(self.fwd), False)
self.fwd.check(traj2)
assert_equal(self._was_cache_reset(self.fwd), True)
# tests for backward
self.rev.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.rev), False)
self.rev.check(self.traj)
assert_equal(self._was_cache_reset(self.rev), True)
self.rev.contents['ens_num'] = 1
assert_equal(self._was_cache_reset(self.rev), False)
self.rev.check(traj2)
assert_equal(self._was_cache_reset(self.rev), True)
def test_trajectory_by_frame(self):
# tests for forward
self.fwd.check(self.traj[0:1])
assert_equal(self._was_cache_reset(self.fwd), True)
self.fwd.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.fwd), False)
self.fwd.check(self.traj[0:2])
assert_equal(self._was_cache_reset(self.fwd), False)
# tests for backward
self.rev.check(self.traj[-1:])
assert_equal(self._was_cache_reset(self.rev), True)
self.rev.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.rev), False)
self.rev.check(self.traj[-2:])
assert_equal(self._was_cache_reset(self.rev), False)
def test_same_traj_twice_no_reset(self):
# tests for forward
self.fwd.check(self.traj)
assert_equal(self._was_cache_reset(self.fwd), True)
self.fwd.contents = { 'test' : 'object' }
self.fwd.check(self.traj)
assert_equal(self._was_cache_reset(self.fwd), False)
# tests for backward
self.rev.check(self.traj)
assert_equal(self._was_cache_reset(self.rev), True)
self.rev.contents = { 'test' : 'object' }
self.rev.check(self.traj)
assert_equal(self._was_cache_reset(self.rev), False)
def test_trajectory_skips_frame(self):
# tests for forward
self.fwd.check(self.traj[0:1])
assert_equal(self._was_cache_reset(self.fwd), True)
self.fwd.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.fwd), False)
self.fwd.check(self.traj[0:3])
assert_equal(self._was_cache_reset(self.fwd), True)
# tests for backward
self.rev.check(self.traj[-1:])
assert_equal(self._was_cache_reset(self.rev), True)
self.rev.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.rev), False)
self.rev.check(self.traj[-3:])
assert_equal(self._was_cache_reset(self.rev), True)
def test_trajectory_middle_frame_changes(self):
# tests for forward
self.fwd.check(self.traj[0:2])
assert_equal(self._was_cache_reset(self.fwd), True)
self.fwd.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.fwd), False)
new_traj = self.traj[0:1] + self.traj[3:5]
self.fwd.check(new_traj)
assert_equal(self._was_cache_reset(self.fwd), True)
# tests for backward
self.rev.check(self.traj[0:2])
assert_equal(self._was_cache_reset(self.rev), True)
self.rev.contents = { 'test' : 'object' }
assert_equal(self._was_cache_reset(self.rev), False)
new_traj = self.traj[-4:-2] + self.traj[-1:]
self.rev.check(new_traj)
assert_equal(self._was_cache_reset(self.rev), True)
class testSequentialEnsembleCache(EnsembleCacheTest):
def setUp(self):
self.inX = AllInXEnsemble(vol1)
self.outX = AllOutXEnsemble(vol1)
self.length1 = LengthEnsemble(1)
self.pseudo_minus = SequentialEnsemble([
self.inX & self.length1,
self.outX,
self.inX,
self.outX,
self.inX & self.length1
])
self.traj = ttraj['lower_in_out_in_in_out_in']
def test_all_in_as_seq_can_append(self):
ens = SequentialEnsemble([AllInXEnsemble(vol1 | vol2 | vol3)])
cache = ens._cache_can_append
traj = ttraj['upper_in_in_out_out_in_in']
assert_equal(ens.can_append(traj[0:1]), True)
assert_equal(ens.can_append(traj[0:2]), True)
assert_equal(ens.can_append(traj[0:3]), True)
assert_equal(ens.can_append(traj[0:4]), True)
assert_equal(ens.can_append(traj[0:5]), True)
assert_equal(ens.can_append(traj[0:6]), True)
def test_sequential_caching_can_append(self):
cache = self.pseudo_minus._cache_can_append
assert_equal(self.pseudo_minus.can_append(self.traj[0:1]), True)
assert_equal(cache.contents['ens_num'], 1)
assert_equal(cache.contents['ens_from'], 0)
assert_equal(cache.contents['subtraj_from'], 1)
logging.getLogger('openpathsampling.ensemble').debug("Starting [0:2]")
assert_equal(self.pseudo_minus.can_append(self.traj[0:2]), True)
assert_equal(cache.contents['ens_num'], 1)
assert_equal(cache.contents['ens_from'], 0)
assert_equal(cache.contents['subtraj_from'], 1)
logging.getLogger('openpathsampling.ensemble').debug("Starting [0:3]")
assert_equal(self.pseudo_minus.can_append(self.traj[0:3]), True)
assert_equal(cache.contents['ens_num'], 2)
assert_equal(cache.contents['ens_from'], 0)
assert_equal(cache.contents['subtraj_from'], 2)
logging.getLogger('openpathsampling.ensemble').debug("Starting [0:4]")
assert_equal(self.pseudo_minus.can_append(self.traj[0:4]), True)
assert_equal(cache.contents['ens_num'], 2)
assert_equal(cache.contents['ens_from'], 0)
assert_equal(cache.contents['subtraj_from'], 2)
logging.getLogger('openpathsampling.ensemble').debug("Starting [0:5]")
assert_equal(self.pseudo_minus.can_append(self.traj[0:5]), True)
assert_equal(cache.contents['ens_num'], 3)
assert_equal(cache.contents['ens_from'], 0)
assert_equal(cache.contents['subtraj_from'], 4)
logging.getLogger('openpathsampling.ensemble').debug("Starting [0:6]")
assert_equal(self.pseudo_minus.can_append(self.traj[0:6]), False)
assert_equal(cache.contents['ens_num'], 4)
assert_equal(cache.contents['ens_from'], 0)
assert_equal(cache.contents['subtraj_from'], 5)
def test_sequential_caching_resets(self):
#cache = self.pseudo_minus._cache_can_append
assert_equal(self.pseudo_minus.can_append(self.traj[2:3]), True)
assert_equal(self.pseudo_minus(self.traj[2:3]), False)
#assert_equal(self._was_cache_reset(cache), True)
assert_equal(self.pseudo_minus.can_append(self.traj[2:4]), True)
assert_equal(self.pseudo_minus(self.traj[2:4]), False)
#assert_equal(self._was_cache_reset(cache), True)
for i in range(4, len(self.traj)-1):
assert_equal(self.pseudo_minus.can_append(self.traj[2:i+1]), True)
assert_equal(self.pseudo_minus(self.traj[2:i+1]), False)
#assert_equal(self._was_cache_reset(cache), False)
assert_equal(self.pseudo_minus.can_append(self.traj[2:]), False)
assert_equal(self.pseudo_minus(self.traj[2:]), False)
#assert_equal(self._was_cache_reset(cache), False)
# TODO: same story backward
raise SkipTest
def test_sequential_caching_call(self):
raise SkipTest
def test_sequential_caching_can_prepend(self):
cache = self.pseudo_minus._cache_can_prepend
assert_equal(self.pseudo_minus.can_prepend(self.traj[5:6]), True)
assert_equal(cache.contents['ens_num'], 3)
assert_equal(cache.contents['ens_from'], 4)
assert_equal(cache.contents['subtraj_from'], -1)
assert_equal(self.pseudo_minus.can_prepend(self.traj[4:6]), True)
assert_equal(cache.contents['ens_num'], 3)
assert_equal(cache.contents['ens_from'], 4)
assert_equal(cache.contents['subtraj_from'], -1)
assert_equal(self.pseudo_minus.can_prepend(self.traj[3:6]), True)
assert_equal(cache.contents['ens_num'], 2)
assert_equal(cache.contents['ens_from'], 4)
assert_equal(cache.contents['subtraj_from'], -2)
assert_equal(self.pseudo_minus.can_prepend(self.traj[2:6]), True)
assert_equal(cache.contents['ens_num'], 2)
assert_equal(cache.contents['ens_from'], 4)
assert_equal(cache.contents['subtraj_from'], -2)
assert_equal(self.pseudo_minus.can_prepend(self.traj[1:6]), True)
assert_equal(cache.contents['ens_num'], 1)
assert_equal(cache.contents['ens_from'], 4)
assert_equal(cache.contents['subtraj_from'], -4)
assert_equal(self.pseudo_minus.can_prepend(self.traj[0:6]), False)
assert_equal(cache.contents['ens_num'], 0)
assert_equal(cache.contents['ens_from'], 4)
assert_equal(cache.contents['subtraj_from'], -5)
class testSlicedTrajectoryEnsemble(EnsembleTest):
def test_sliced_ensemble_init(self):
init_as_int = SlicedTrajectoryEnsemble(AllInXEnsemble(vol1), 3)
init_as_slice = SlicedTrajectoryEnsemble(AllInXEnsemble(vol1),
slice(3, 4))
assert_equal(init_as_int, init_as_slice)
assert_equal(init_as_slice.region, init_as_int.region)
def test_sliced_as_TISEnsemble(self):
'''SlicedTrajectory and Sequential give same TIS results'''
sliced_tis = (
SlicedTrajectoryEnsemble(AllInXEnsemble(vol1), 0) &
SlicedTrajectoryEnsemble(AllOutXEnsemble(vol1 | vol3), slice(1,-1)) &
SlicedTrajectoryEnsemble(PartOutXEnsemble(vol2), slice(1,-1)) &
SlicedTrajectoryEnsemble(AllInXEnsemble(vol1 | vol3), -1)
)
sequential_tis = SequentialEnsemble([
AllInXEnsemble(vol1) & LengthEnsemble(1),
AllOutXEnsemble(vol1 | vol3) & PartOutXEnsemble(vol2),
AllInXEnsemble(vol1 | vol3) & LengthEnsemble(1)
])
real_tis = paths.TISEnsemble(vol1, vol3, vol2)
for test in list(ttraj.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(real_tis, ttraj[test],
sequential_tis(ttraj[test]), failmsg)
self._single_test(sliced_tis, ttraj[test],
sequential_tis(ttraj[test]), failmsg)
def test_slice_outside_trajectory_range(self):
ens = SlicedTrajectoryEnsemble(AllInXEnsemble(vol1), slice(5,9))
test = 'upper_in'
# the slice should return the empty trajectory, and therefore should
# return false
assert_equal(ens(ttraj[test]), False)
def test_even_sliced_trajectory(self):
even_slice = slice(None, None, 2)
ens = SlicedTrajectoryEnsemble(AllInXEnsemble(vol1), even_slice)
bare_results = {'in' : True,
'in_in' : True,
'in_in_in' : True,
'in_out_in' : True,
'in_in_out' : False,
'in_out_in_in' : True,
'in_out_in_out_in' : True,
'out' : False,
'in_in_out_in' : False,
'in_cross_in_cross' : True
}
results = results_upper_lower(bare_results)
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(ens, ttraj[test], results[test], failmsg)
def test_sliced_sequential_global_whole(self):
even_slice = slice(None, None, 2)
ens = SlicedTrajectoryEnsemble(SequentialEnsemble([
AllInXEnsemble(vol1),
AllOutXEnsemble(vol1)
]), even_slice)
bare_results = {'in_in_out' : True,
'in_hit_out' : True,
'in_out_out_in_out' : True,
'in_hit_out_in_out' : True,
'in_out_out_in' : True,
'in_cross_in_cross' : False,
'in_out_out_in' : True,
'in_out_in' : False
}
results = results_upper_lower(bare_results)
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(ens, ttraj[test], results[test], failmsg)
def test_sliced_sequential_subtraj_member(self):
even_slice = slice(None, None, 2)
ens = SequentialEnsemble([
AllInXEnsemble(vol1),
SlicedTrajectoryEnsemble(AllOutXEnsemble(vol1), even_slice)
])
bare_results = {'in_out_in' : True,
'in_out_out_in' : False,
'in_in_out_in' : True,
'in_in_out' : True,
'in_in_cross_in' : True,
'in_out_in_out' : True,
'in_out_cross_out_in_out_in_out_cross_out_in' : True,
'in_out_in_in_out_in' : False
}
results = results_upper_lower(bare_results)
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(ens, ttraj[test], results[test], failmsg)
def test_sliced_sequential_subtraj_middle(self):
even_slice = slice(None, None, 2)
ens = SequentialEnsemble([
AllInXEnsemble(vol1),
SlicedTrajectoryEnsemble(AllOutXEnsemble(vol1), even_slice),
AllInXEnsemble(vol1) & LengthEnsemble(1)
])
bare_results = {'in_in_out_out_in_in' : False
}
results = results_upper_lower(bare_results)
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(ens, ttraj[test], results[test], failmsg)
def test_sliced_str(self):
even_slice = slice(None,None, 2)
slice_1_10 = slice(1, 10)
slice_1_end = slice(1,None)
slice_no_ends = slice(1, -1)
inX = AllInXEnsemble(vol1)
inXstr = "x[t] in {x|Id(x) in [0.1, 0.5]} for all t"
assert_equal(SlicedTrajectoryEnsemble(inX, even_slice).__str__(),
"("+inXstr+" in {:} every 2)")
assert_equal(SlicedTrajectoryEnsemble(inX, slice_1_10).__str__(),
"("+inXstr+" in {1:10})")
assert_equal(SlicedTrajectoryEnsemble(inX, slice_1_end).__str__(),
"("+inXstr+" in {1:})")
assert_equal(SlicedTrajectoryEnsemble(inX, slice_no_ends).__str__(),
"("+inXstr+" in {1:-1})")
class testOptionalEnsemble(EnsembleTest):
def setUp(self):
self.start_opt = SequentialEnsemble([
OptionalEnsemble(AllOutXEnsemble(vol1)),
AllInXEnsemble(vol1),
AllOutXEnsemble(vol1)
])
self.end_opt = SequentialEnsemble([
AllOutXEnsemble(vol1),
AllInXEnsemble(vol1),
OptionalEnsemble(AllOutXEnsemble(vol1))
])
self.mid_opt = SequentialEnsemble([
AllInXEnsemble(vol1),
OptionalEnsemble(AllOutXEnsemble(vol1) & AllInXEnsemble(vol2)),
AllOutXEnsemble(vol2)
])
def test_optional_start(self):
bare_results = {'in_out' : True,
'in_in_out' : True,
'out_in_out' : True,
'out_out' : False,
'out_in_in_out' : True,
'in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.start_opt
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_start_can_append(self):
bare_results = {'in' : True,
'out' : True,
'in_out' : True,
'out_in' : True,
'out_out_in' : True,
'in_out_in' : False,
'out_in_out' : True
}
results = results_upper_lower(bare_results)
fcn = self.start_opt.can_append
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_start_strict_can_append(self):
bare_results = {'in' : True,
'out' : True,
'in_out' : True,
'out_in' : True,
'out_out_in' : True,
'in_out_in' : False,
'out_in_out' : True
}
results = results_upper_lower(bare_results)
fcn = self.start_opt.strict_can_append
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_start_can_prepend(self):
bare_results = {'in' : True,
'out' : True,
'out_in_out' : True,
'out_out_in_out' : True,
'in_out' : True,
'out_in_out' : True,
'in_out_in_out' : False,
'out_in' : True,
'out_in_out_in' : False,
'in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.start_opt.can_prepend
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_start_strict_can_prepend(self):
bare_results = {'in' : False,
'out' : True,
'out_in_out' : True,
'out_out_in_out' : True,
'in_out' : True,
'out_in_out' : True,
'in_out_in_out' : False,
'out_in' : False,
'out_in_out_in' : False,
'in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.start_opt.strict_can_prepend
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle(self):
bare_results = {'in_out_cross' : True,
'in_cross' : True,
'in_out' : False,
'out_cross' : False,
'cross_in_cross_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.mid_opt
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle_can_append(self):
bare_results = {'in' : True,
'out' : True,
'in_out' : True,
'out_in' : False,
'in_cross' : True,
'in_out_cross' : True,
'out_cross' : True,
'in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.mid_opt.can_append
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle_strict_can_append(self):
bare_results = {'in' : True,
'out' : False,
'in_out' : True,
'out_in' : False,
'in_cross' : True,
'in_out_cross' : True,
'out_cross' : False,
'in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.mid_opt.strict_can_append
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle_can_prepend(self):
bare_results = {'in' : True,
'out' : True,
'in_out' : True,
'out_in' : False,
'in_cross' : True,
'out_cross' : True,
'in_cross_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.mid_opt.can_prepend
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle_strict_can_prepend(self):
bare_results = {'in' : False,
'out' : False,
'in_out' : False,
'out_in' : False,
'in_cross' : True,
'out_cross' : True,
'in_cross_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.mid_opt.strict_can_prepend
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_end(self):
bare_results = {'out_in' : True,
'out_in_out' : True,
'in_out' : False,
'out_out_in_out' : True,
'out_in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.end_opt
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_end_can_append(self):
bare_results = {'in' : True,
'out' : True,
'out_in' : True,
'in_out' : True,
'out_in_out' : True,
'in_out_in' : False,
'out_in_out_in' : False,
'in_in_out' : True
}
results = results_upper_lower(bare_results)
fcn = self.end_opt.can_append
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_end_strict_can_append(self):
bare_results = {'in' : False,
'out' : True,
'out_in' : True,
'in_out' : False,
'out_in_out' : True,
'in_out_in' : False,
'out_in_out_in' : False,
'in_in_out' : False
}
results = results_upper_lower(bare_results)
fcn = self.end_opt.strict_can_append
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle_can_prepend(self):
bare_results = {'in' : True,
'out' : True,
'out_in' : True,
'in_out' : True,
'out_in_out' : True,
'in_out_in_out' : False,
'in_out_in' : False,
'out_in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.end_opt.can_prepend
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_middle_strict_can_prepend(self):
bare_results = {'in' : True,
'out' : True,
'out_in' : True,
'in_out' : True,
'out_in_out' : True,
'in_out_in_out' : False,
'in_out_in' : False,
'out_in_out_in' : False
}
results = results_upper_lower(bare_results)
fcn = self.end_opt.strict_can_prepend
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+tstr(ttraj[test])+"): "
self._single_test(fcn, ttraj[test], results[test], failmsg)
def test_optional_str(self):
inX = AllInXEnsemble(vol1)
opt_inX = OptionalEnsemble(inX)
assert_equal(opt_inX.__str__(), "{"+inX.__str__()+"} (OPTIONAL)")
class testPrefixTrajectoryEnsemble(EnsembleTest):
def setUp(self):
self.inX = AllInXEnsemble(vol1)
def test_bad_start_traj(self):
traj = ttraj['upper_out_in_in_in']
ens = PrefixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[0:2]
)
assert_equal(ens.can_append(traj[0:3]), False)
assert_equal(ens.strict_can_append(traj[0:3]), False)
assert_equal(ens(traj[0:3]), False)
def test_good_start_traj(self):
traj = ttraj['upper_in_in_in']
ens = PrefixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[0:2]
)
assert_equal(ens.can_append(traj[2:3]), True)
assert_equal(ens.strict_can_append(traj[2:3]), True)
assert_equal(ens(traj[2:3]), True)
@raises(RuntimeError)
def test_can_prepend(self):
traj = ttraj['upper_in_in_in']
ens = PrefixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[0:2]
)
ens.can_prepend(traj[2:3])
@raises(RuntimeError)
def test_strict_can_prepend(self):
traj = ttraj['upper_in_in_in']
ens = PrefixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[0:2]
)
ens.strict_can_prepend(traj[2:3])
def test_caching_in_fwdapp_seq(self):
inX = AllInXEnsemble(vol1)
outX = AllOutXEnsemble(vol1)
length1 = LengthEnsemble(1)
pseudo_minus = SequentialEnsemble([
inX & length1,
outX,
inX,
outX,
inX & length1
])
traj = ttraj['upper_in_out_in_in_out_in']
ens = PrefixTrajectoryEnsemble(pseudo_minus, traj[0:2])
assert_equal(ens.can_append(traj[2:3]), True)
assert_equal(ens._cached_trajectory, traj[0:3])
assert_equal(ens._cache_can_append.trusted, False)
assert_equal(ens.can_append(traj[2:4]), True)
assert_equal(ens._cached_trajectory, traj[0:4])
assert_equal(ens._cache_can_append.trusted, True)
assert_equal(ens.can_append(traj[2:5]), True)
assert_equal(ens._cached_trajectory, traj[0:5])
assert_equal(ens._cache_can_append.trusted, True)
assert_equal(ens.can_append(traj[2:6]), False)
assert_equal(ens._cached_trajectory, traj[0:6])
assert_equal(ens._cache_can_append.trusted, True)
class testSuffixTrajectoryEnsemble(EnsembleTest):
def setUp(self):
xval = paths.FunctionCV("x", lambda s : s.xyz[0][0])
vol = paths.CVDefinedVolume(xval, 0.1, 0.5)
self.inX = AllInXEnsemble(vol)
self.outX = AllOutXEnsemble(vol)
def test_bad_end_traj(self):
traj = ttraj['upper_in_in_in_out']
ens = SuffixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[-2:]
)
assert_equal(ens.can_prepend(traj[-3:2]), False)
assert_equal(ens.strict_can_prepend(traj[-3:2]), False)
assert_equal(ens(traj[-3:2]), False)
def test_good_end_traj(self):
traj = ttraj['upper_out_in_in_in']
ens = SuffixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[-2:]
)
assert_equal(ens.can_prepend(traj[-3:-2]), True)
assert_equal(ens.strict_can_prepend(traj[-3:-2]), True)
assert_equal(ens(traj[-3:-2]), True)
assert_equal(ens.can_prepend(traj[-4:-2]), False)
assert_equal(ens.strict_can_prepend(traj[-4:-2]), False)
assert_equal(ens(traj[-4:-2]), False)
@raises(RuntimeError)
def test_can_append(self):
traj = ttraj['upper_out_in_in_in']
ens = SuffixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[-2:]
)
ens.can_append(traj)
@raises(RuntimeError)
def test_strict_can_append(self):
traj = ttraj['upper_out_in_in_in']
ens = SuffixTrajectoryEnsemble(
SequentialEnsemble([self.inX]),
traj[-2:]
)
ens.strict_can_append(traj)
def test_caching_in_bkwdprep_seq(self):
length1 = LengthEnsemble(1)
pseudo_minus = SequentialEnsemble([
self.inX & length1,
self.outX,
self.inX,
self.outX,
self.inX & length1
])
traj = ttraj['upper_in_out_in_in_out_in']
# sanity checks before running the suffixed version
assert_equal(pseudo_minus(traj), True)
for i in range(-1, -6):
assert_equal(pseudo_minus.can_prepend(traj[i:]), True)
logger.debug("alltraj " + str([id(i) for i in traj]))
ens = SuffixTrajectoryEnsemble(pseudo_minus, traj[-3:])
assert_equal(len(ens._cached_trajectory), 3)
assert_equal(ens.can_prepend(traj[-4:-3].reversed), True)
assert_equal(len(ens._cached_trajectory), 4)
assert_equal(ens._cache_can_prepend.trusted, False)
assert_equal(ens.can_prepend(traj[-5:-3].reversed), True)
assert_equal(len(ens._cached_trajectory), 5)
assert_equal(ens._cache_can_prepend.trusted, True)
assert_equal(ens.can_prepend(traj[-6:-3].reversed), False)
assert_equal(len(ens._cached_trajectory), 6)
assert_equal(ens._cache_can_prepend.trusted, True)
class testMinusInterfaceEnsemble(EnsembleTest):
def setUp(self):
# Mostly we use minus ensembles where the state matches the first
# interface. We also test the case where that isn't in, in which
# case there's an interstitial zone. (Only test it for nl=2 to keep
# things easier.)
self.minus_nl2 = MinusInterfaceEnsemble(
state_vol=vol1,
innermost_vols=vol1,
n_l=2
)
self.minus_interstitial_nl2 = MinusInterfaceEnsemble(
state_vol=vol1,
innermost_vols=vol2,
n_l=2
)
self.minus_nl3 = MinusInterfaceEnsemble(
state_vol=vol1,
innermost_vols=vol1,
n_l=3
)
@raises(ValueError)
def test_minus_nl1_fail(self):
minus_nl1 = MinusInterfaceEnsemble(state_vol=vol1,
innermost_vols=vol2,
n_l=1)
def test_minus_nl2_ensemble(self):
non_default = [
'in_cross_in_cross_in',
'in_out_in_in_out_in',
'in_out_in_out_in'
]
self._test_everything(self.minus_nl2, non_default, False)
def test_minus_nl2_can_append(self):
non_default = [
'in_cross_in_cross_in',
'in_out_in_in_out_in',
'in_out_in_out_in',
'cross_in_cross_in',
'in_in_cross_in',
'in_in_out_in',
'in_in_out_in_out',
'in_in_out_out_in_in',
'in_out_cross_out_in_out_in_out_cross_out_in',
'out_in_cross_in',
'out_in_in_in_out_in_out_in_in_in_out',
'out_in_in_out_in',
'out_in_out_in',
'out_in_out_in_out',
'out_in_out_out_in',
'out_in_out_out_in_out',
'in_hit_out_in_out',
'out_hit_in_out_in'
]
self._test_everything(self.minus_nl2.can_append, non_default, True)
def test_minus_nl2_strict_can_append(self):
non_default = [
'in',
'hit',
'in_out',
'in_out_in',
'in_out_out_out',
'in_out_out_in',
'in_out_in_out',
'in_out_in_in',
'in_out_out_in_out',
'in_out_in_in_out',
'in_cross',
'in_cross_in',
'in_out_cross',
'in_out_cross_in',
'in_out_cross_out_in',
'in_cross_in_cross',
]
self._test_everything(self.minus_nl2.strict_can_append, non_default,
False)
def test_minus_nl2_can_prepend(self):
non_default = [
'in_cross_in_cross',
'in_cross_in_cross_in',
'in_in_out_in_out',
'in_in_out_out_in_in',
'in_out_cross_out_in_out_in_out_cross_out_in',
'in_out_in_in',
'in_out_in_in_out',
'in_out_in_in_out_in',
'in_out_in_out',
'in_out_in_out_in',
'in_out_out_in_out',
'out_in_in_in_out_in_out_in_in_in_out',
'out_in_out_in_out',
'out_in_out_out_in_out',
'in_hit_out_in_out'
]
self._test_everything(self.minus_nl2.can_prepend, non_default, True)
def test_minus_nl2_strict_can_prepend(self):
non_default = [
'in',
'hit',
'out_in',
'cross_in',
'in_cross_in',
'in_out_cross_in',
'in_out_cross_out_in',
'in_out_in',
'out_out_in',
'out_out_out_in',
'in_in_cross_in',
'out_in_cross_in',
'out_in_out_out_in',
'in_in_out_in',
'in_out_out_in',
'out_in_out_in',
'out_in_in_out_in',
'out_out_in_out_in',
'cross_in_cross_in',
'out_hit_in_out_in'
]
self._test_everything(self.minus_nl2.strict_can_prepend,
non_default, False)
def test_minus_interstitial_nl2_ensemble(self):
non_default = [
'in_cross_in_cross_in',
'in_out_cross_out_in_out_in_out_cross_out_in',
]
self._test_everything(self.minus_interstitial_nl2, non_default, False)
def test_minus_interstitial_nl2_can_append(self):
non_default = [
'in_cross_in_cross_in',
'in_out_cross_out_in_out_in_out_cross_out_in',
'cross_in_cross_in',
'in_in_cross_in',
'out_in_cross_in'
]
self._test_everything(self.minus_interstitial_nl2.can_append,
non_default, True)
def test_minus_interstitial_nl2_strict_can_append(self):
non_default = [
'in',
'hit',
'in_out',
'in_out_out_out',
'in_cross',
'in_cross_in',
'in_out_cross',
'in_out_cross_in',
'in_out_cross_out_in',
'in_cross_in_cross',
]
self._test_everything(self.minus_interstitial_nl2.strict_can_append,
non_default, False)
def test_minus_interstitial_nl2_can_prepend(self):
non_default = [
'in_cross_in_cross_in',
'in_out_cross_out_in_out_in_out_cross_out_in',
'in_cross_in_cross'
]
self._test_everything(self.minus_interstitial_nl2.can_prepend,
non_default, True)
def test_minus_interstitial_nl2_strict_can_prepend(self):
non_default = [
'in',
'hit',
'out_in',
'cross_in',
'in_cross_in',
'in_out_cross_in',
'in_out_cross_out_in',
'out_out_in',
'out_out_out_in',
'in_in_cross_in',
'out_in_cross_in',
'cross_in_cross_in',
]
self._test_everything(self.minus_interstitial_nl2.strict_can_prepend,
non_default, False)
def test_minus_nl3_ensemble(self):
non_default = [
'in_out_cross_out_in_out_in_out_cross_out_in',
]
self._test_everything(self.minus_nl3, non_default, False)
def test_minus_nl3_can_append(self):
non_default = [
'in_out_cross_out_in_out_in_out_cross_out_in',
'out_in_in_in_out_in_out_in_in_in_out'
]
self._test_everything(self.minus_nl3.can_append, non_default, True)
def test_minus_nl3_strict_can_append(self):
non_default = [
'in',
'hit',
'in_out',
'in_out_in',
'in_out_out_out',
'in_out_out_in',
'in_out_in_out',
'in_out_in_in',
'in_out_out_in_out',
'in_out_in_in_out',
'in_cross',
'in_cross_in',
'in_out_cross',
'in_out_cross_in',
'in_out_cross_out_in',
'in_cross_in_cross',
'in_cross_in_cross_in',
'in_out_in_in_out_in',
'in_out_in_out_in'
]
self._test_everything(self.minus_nl3.strict_can_append, non_default,
False)
def test_minus_nl3_can_prepend(self):
non_default = [
'in_out_cross_out_in_out_in_out_cross_out_in',
'out_in_in_in_out_in_out_in_in_in_out'
]
self._test_everything(self.minus_nl3.can_prepend, non_default, True)
def test_minus_nl3_strict_can_prepend(self):
non_default = [
'in',
'hit',
'out_in',
'cross_in',
'in_cross_in',
'in_out_cross_in',
'in_out_cross_out_in',
'in_out_in',
'out_out_in',
'out_out_out_in',
'in_in_cross_in',
'out_in_cross_in',
'out_in_out_out_in',
'in_in_out_in',
'in_out_out_in',
'out_in_out_in',
'out_in_in_out_in',
'out_out_in_out_in',
'cross_in_cross_in',
'out_hit_in_out_in',
'in_cross_in_cross_in',
'in_out_in_in_out_in',
'in_out_in_out_in'
]
self._test_everything(self.minus_nl3.strict_can_prepend,
non_default, False)
def test_extend_sample_from_trajectories(self):
# set up ensA and ensB
ensA = paths.TISEnsemble(vol1, vol3, vol1, op)
ensB = paths.TISEnsemble(vol1, vol3, vol2, op)
# set up trajA and trajB
trajA = make_1d_traj([0.25, 1.0, 1.5, 2.1])
trajB = ttraj['upper_in_cross_in']
sset = paths.SampleSet([
paths.Sample(replica=0, ensemble=ensA, trajectory=trajA),
paths.Sample(replica=1, ensemble=ensB, trajectory=trajB)
])
sset.sanity_check()
# test with first trajectory
predestined_snaps = [trajB[-1]]+ttraj['upper_out_in']
predestined_traj = [s.xyz[0][0] for s in predestined_snaps]
engine = CalvinistDynamics(predestined_traj)
sample = self.minus_nl2.extend_sample_from_trajectories(
sset, replica=-1, engine=engine, level='complex'
)
assert_equal(sample.ensemble(sample.trajectory), True)
assert_equal(sample.ensemble, self.minus_nl2)
assert_equal(sample.replica, -1)
assert_equal(len(sample.trajectory), 5)
expected = trajB + ttraj['upper_out_in']
for (t, b) in zip(sample.trajectory, expected):
assert_equal(t.xyz[0][0], b.xyz[0][0])
# test with a different trajectory
predestined_snaps = [trajB[-1]]+ttraj['upper_in_out_in']
predestined_traj = [s.xyz[0][0] for s in predestined_snaps]
engine = CalvinistDynamics(predestined_traj)
sample = self.minus_nl2.extend_sample_from_trajectories(
sset, replica=-1, engine=engine, level='complex'
)
assert_equal(sample.ensemble(sample.trajectory), True)
assert_equal(sample.ensemble, self.minus_nl2)
assert_equal(sample.replica, -1)
assert_equal(len(sample.trajectory), 6)
expected = trajB + ttraj['upper_in_out_in']
for (t, b) in zip(sample.trajectory, expected):
assert_equal(t.xyz[0][0], b.xyz[0][0])
# TODO: this whole class should become a single test in SeqEns
class testSingleEnsembleSequentialEnsemble(EnsembleTest):
def setUp(self):
#self.inner_ens = AllInXEnsemble(vol1 | vol2)
self.inner_ens = LengthEnsemble(3) & AllInXEnsemble( vol1 | vol2 )
self.ens = SequentialEnsemble([self.inner_ens])
def test_it_all(self):
for test in list(ttraj.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.ens, ttraj[test],
self.inner_ens(ttraj[test]), failmsg)
self._single_test(self.ens.can_append, ttraj[test],
self.inner_ens.can_append(ttraj[test]), failmsg)
self._single_test(self.ens.can_prepend, ttraj[test],
self.inner_ens.can_prepend(ttraj[test]), failmsg)
class testEnsembleSplit(EnsembleTest):
def setUp(self):
self.inA = AllInXEnsemble(vol1)
self.outA = AllOutXEnsemble(vol1)
def test_split(self):
# raise SkipTest
traj1 = ttraj['upper_in_out_in_in']
# print [s for s in traj1]
subtrajs_in_1 = self.inA.split(traj1)
# print subtrajs_in_1
# print [[s for s in t] for t in subtrajs_in_1]
assert_equal(len(subtrajs_in_1), 2)
assert_equal(len(subtrajs_in_1[0]), 1)
assert_equal(len(subtrajs_in_1[1]), 2)
subtrajs_out_1 = self.outA.split(traj1)
assert_equal(len(subtrajs_out_1), 1)
traj2 = ttraj['upper_in_out_in_in_out_in']
# print [s for s in traj2]
subtrajs_in_2 = self.inA.split(traj2)
# print [[s for s in t] for t in subtrajs_in_2]
assert_equal(len(subtrajs_in_2), 3)
assert_equal(len(subtrajs_in_2[0]), 1)
assert_equal(len(subtrajs_in_2[1]), 2)
assert_equal(len(subtrajs_in_2[2]), 1)
subtrajs_out_2 = self.outA.split(traj2)
assert_equal(len(subtrajs_out_2), 2)
assert_equal(len(subtrajs_out_2[0]), 1)
assert_equal(len(subtrajs_out_2[1]), 1)
ensembleAXA = paths.SequentialEnsemble([
self.inA,
self.outA,
self.inA
])
traj3 = make_1d_traj(coordinates=[0.3, 0.6, 0.3, 0.6, 0.3])
assert(self.inA(paths.Trajectory([traj3[0]])))
assert(self.outA(paths.Trajectory([traj3[1]])))
subtrajs_in_3 = ensembleAXA.split(traj3)
assert_equal((len(subtrajs_in_3)), 2)
assert_equal((len(subtrajs_in_3[0])), 3)
assert_equal((len(subtrajs_in_3[1])), 3)
assert(traj3.subtrajectory_indices(subtrajs_in_3[0]) == [0, 1, 2])
assert(traj3.subtrajectory_indices(subtrajs_in_3[1]) == [2, 3, 4])
subtrajs_in_3 = ensembleAXA.split(traj3, reverse=True)
assert_equal((len(subtrajs_in_3)), 2)
assert_equal((len(subtrajs_in_3[0])), 3)
assert_equal((len(subtrajs_in_3[1])), 3)
assert(traj3.subtrajectory_indices(subtrajs_in_3[0]) == [2, 3, 4])
assert(traj3.subtrajectory_indices(subtrajs_in_3[1]) == [0, 1, 2])
subtrajs_in_3 = ensembleAXA.split(traj3, overlap=0)
assert_equal((len(subtrajs_in_3)), 1)
assert_equal((len(subtrajs_in_3[0])), 3)
assert(traj3.subtrajectory_indices(subtrajs_in_3[0]) == [0, 1, 2])
subtrajs_in_3 = ensembleAXA.split(traj3, reverse=True, overlap=0)
assert_equal((len(subtrajs_in_3)), 1)
assert_equal((len(subtrajs_in_3[0])), 3)
assert(traj3.subtrajectory_indices(subtrajs_in_3[0]) == [2, 3, 4])
subtrajs_in_3 = ensembleAXA.split(traj3, overlap=1, max_length=2)
assert_equal((len(subtrajs_in_3)), 0)
subtrajs_in_3 = ensembleAXA.split(traj3, reverse=True, max_length=2)
assert_equal((len(subtrajs_in_3)), 0)
subtrajs_in_3 = ensembleAXA.split(traj3, max_length=3)
assert_equal(len(subtrajs_in_3), 2)
assert_equal((len(subtrajs_in_3[0])), 3)
assert_equal((len(subtrajs_in_3[1])), 3)
assert(traj3.subtrajectory_indices(subtrajs_in_3[0]) == [0, 1, 2])
assert(traj3.subtrajectory_indices(subtrajs_in_3[1]) == [2, 3, 4])
subtrajs_in_3 = ensembleAXA.split(traj3, reverse=True, max_length=3)
assert_equal((len(subtrajs_in_3)), 2)
assert_equal((len(subtrajs_in_3[0])), 3)
assert_equal((len(subtrajs_in_3[1])), 3)
assert(traj3.subtrajectory_indices(subtrajs_in_3[1]) == [0, 1, 2])
assert(traj3.subtrajectory_indices(subtrajs_in_3[0]) == [2, 3, 4])
subtrajs_in_3 = ensembleAXA.split(traj3, reverse=False, min_length=4)
assert_equal((len(subtrajs_in_3)), 0)
subtrajs_in_3 = ensembleAXA.split(traj3, reverse=True, min_length=4)
assert_equal((len(subtrajs_in_3)), 0)
sub_traj = ensembleAXA.find_first_subtrajectory(traj3)
assert(traj3.subtrajectory_indices(sub_traj) == [0,1,2])
sub_traj = ensembleAXA.find_last_subtrajectory(traj3)
assert(traj3.subtrajectory_indices(sub_traj) == [2,3,4])
class testVolumeCombinations(EnsembleTest):
def setup(self):
self.outA = paths.AllOutXEnsemble(vol1)
self.outB = paths.AllOutXEnsemble(~vol2)
self.outA.special_debug = True
self.outB.special_debug = True
self.partinA = paths.PartInXEnsemble(vol1)
self.partinB = paths.PartInXEnsemble(~vol2)
self.outA_or_outB = self.outA | self.outB
self.outA_and_outB = self.outA & self.outB
self.partinA_or_partinB = self.partinA | self.partinB
self.partinA_and_partinB = self.partinA & self.partinB
extras = build_trajdict(['babbc', 'ca', 'bcbba', 'abbc', 'cbba',
'abbcb', 'cbbab'], lower, upper)
for test in list(extras.keys()):
extras[test] = make_1d_traj(coordinates=extras[test],
velocities=[1.0]*len(extras[test]))
self.local_ttraj = dict(ttraj)
self.local_ttraj.update(extras)
def _test_trusted(self, trajectory, function, results,
cache_results=None, direction=+1, start_traj_len=1):
# Tests `trajectory` frame by frame in a forward direction for the
# `function`, expecting `results`. Additionally, can take the
if cache_results is None:
cache_results = {}
# clear the caches before starting
for cache in list(cache_results.keys()):
cache.__init__(direction=cache.direction)
for i in range(len(trajectory)-start_traj_len):
if direction > 0:
start = 0
end = start + (i+start_traj_len)
elif direction < 0:
end = len(trajectory)
start = end - (i+start_traj_len)
# test untrusted
assert_equal(function(trajectory[start:end]), results[i])
# test trusted
trusted_val = function(trajectory[start:end], trusted=True)
# print i, "["+str(start)+":"+str(end)+"]", trusted_val, results[i]
assert_equal(trusted_val, results[i])
for cache in list(cache_results.keys()):
# TODO: this is currently very specific to the caches used
# by volumes ensembles. That should be generalized by
# allowing several different tags within contents.
# cache_results could {cache : {'content_key' : [values]}}
if cache_results[cache][i] is not None:
#print "cache", cache_results.keys().index(cache),
try:
contents = cache.contents['previous']
except KeyError:
contents = None
#print contents, cache_results[cache][i]
assert_equal(cache.contents['previous'],
cache_results[cache][i])
def test_call_outA_or_outB(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_out_in_out_out_cross'],
function=self.outA_or_outB,
results=[True, True, True, True, False],
cache_results={
self.outA._cache_call : [True, False, False, False, False],
self.outB._cache_call : [None, True, True, True, False]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_out_cross_out_out_in'],
function=self.outA_or_outB,
results=[True, True, True, True, False],
cache_results={
self.outA._cache_call : [True, True, True, True, False],
self.outB._cache_call : [None, None, None, None, False]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_in_cross'],
function=self.outA_or_outB,
results=[True, False],
cache_results={
self.outA._cache_call : [False, False],
self.outB._cache_call : [True, False]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_cross_in'],
function=self.outA_or_outB,
results=[True, False],
cache_results={
self.outA._cache_call : [True, False],
self.outB._cache_call : [None, False]
}
)
def test_call_outA_and_outB(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_out_in_out_out_cross'],
function=self.outA_and_outB,
results=[True, False, False, False, False],
cache_results={
# cache for A gets checked first: value of cache for B
# doesn't matter once cache for A is False (short-circuit)
self.outA._cache_call : [True, False, False, False, False],
self.outB._cache_call : [True, None, None, None, None]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_out_cross_out_out_in'],
function=self.outA_and_outB,
results=[True, False, False, False, False],
cache_results={
self.outA._cache_call : [True, True, True, True, False],
self.outB._cache_call : [True, False, False, False, None]
}
)
def test_can_append_outA_or_outB(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_out_in_out_out_cross'],
function=self.outA_or_outB.can_append,
results=[True, True, True, True, False],
cache_results={
self.outA._cache_can_append : [True, False, False, False, False],
self.outB._cache_can_append : [None, True, True, True, False]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_out_cross_out_out_in'],
function=self.outA_or_outB.can_append,
results=[True, True, True, True, False],
cache_results={
self.outA._cache_can_append : [True, True, True, True, False],
self.outB._cache_can_append : [None, None, None, None, False]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_in_cross'],
function=self.outA_or_outB.can_append,
results=[True, False],
cache_results={
self.outA._cache_can_append : [False, False],
self.outB._cache_can_append : [True, False]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_cross_in'],
function=self.outA_or_outB.can_append,
results=[True, False],
cache_results={
self.outA._cache_can_append : [True, False],
self.outB._cache_can_append : [None, False]
}
)
def test_call_start_from_later_frame(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_out_cross_out_out_in'],
function=self.outA_or_outB.can_append,
results=[True, True, True, False],
cache_results={
self.outA._cache_can_append : [True, True, True, False],
self.outB._cache_can_append : [None, None, None, False]
},
start_traj_len=2
)
def test_can_append_outA_and_outB(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_out_in_out_out_cross'],
function=self.outA_and_outB.can_append,
results=[True, False, False, False, False],
cache_results={
# cache for A gets checked first: value of cache for B
# doesn't matter once cache for A is False (short-circuit)
self.outA._cache_can_append : [True, False, False, False, False],
self.outB._cache_can_append : [True, None, None, None, None]
}
)
self._test_trusted(
trajectory=self.local_ttraj['upper_out_cross_out_out_in'],
function=self.outA_and_outB.can_append,
results=[True, False, False, False, False],
cache_results={
self.outA._cache_can_append : [True, True, True, True, False],
self.outB._cache_can_append : [True, False, False, False, None]
}
)
def test_can_prepend_outA_or_outB(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_in_out_out_cross'],
function=self.outA_or_outB.can_prepend,
results=[True, True, True, False],
cache_results={
self.outA._cache_can_prepend : [True, True, True, False],
self.outB._cache_can_prepend : [None, None, None, False]
},
direction=-1
)
self._test_trusted(
trajectory=self.local_ttraj['upper_cross_out_out_in'],
function=self.outA_or_outB.can_prepend,
results=[True, True, True, False],
cache_results={
self.outA._cache_can_prepend : [False, False, False, False],
self.outB._cache_can_prepend : [True, True, True, False]
},
direction=-1
)
def test_can_prepend_start_from_later_frame(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_in_out_out_cross'],
function=self.outA_or_outB.can_prepend,
results=[True, True, False],
cache_results={
self.outA._cache_can_prepend : [True, True, False],
self.outB._cache_can_prepend : [None, None, False]
},
direction=-1,
start_traj_len=2
)
def test_can_prepend_outA_and_outB(self):
self._test_trusted(
trajectory=self.local_ttraj['upper_in_out_out_cross_out'],
function=self.outA_and_outB.can_prepend,
results=[True, False, False, False, False],
cache_results={
self.outA._cache_can_prepend : [True, True, True, True, False],
self.outB._cache_can_prepend : [True, False, False, False, False]
},
direction=-1
)
self._test_trusted(
trajectory=self.local_ttraj['upper_cross_out_out_in_out'],
function=self.outA_and_outB.can_prepend,
results=[True, False, False, False, False],
cache_results={
self.outA._cache_can_prepend : [True, False, False, False, False],
self.outB._cache_can_prepend : [True, None, None, None, None]
},
direction=-1
)
class testAbstract(object):
@raises_with_message_like(TypeError, "Can't instantiate abstract class")
def test_abstract_ensemble(self):
mover = paths.Ensemble()
@raises_with_message_like(TypeError, "Can't instantiate abstract class")
def test_abstract_volumeensemble(self):
mover = paths.VolumeEnsemble()
class TestEnsembleEquality(object):
# generic tests for ensemble equality; we use the EmptyEnsemble as
# example. See:
# * https://github.com/openpathsampling/openpathsampling/issues/700
# * https://github.com/openpathsampling/openpathsampling/issues/701
def test_empty_ensemble_equality(self):
ens1 = paths.EmptyEnsemble()
ens2 = paths.EmptyEnsemble()
assert_true(ens1 == ens2)
assert_false(ens1 != ens2)
# TODO: may add tests for other ensembles, or may move this test
# somewhere else
|
jhprinz/openpathsampling
|
openpathsampling/tests/testensemble.py
|
Python
|
lgpl-2.1
| 108,832
|
[
"OpenMM"
] |
94fc9c1903ef59105cf628ca1e204cb5029623f0b6e8ca49278159cd2c0126dd
|
""" Module with helper classes and functions. """
def assign_protein_data_to_blast_results(blast_records_in_object_and_list):
"""Searches data related to transcript in local database.
Modify objects by assigning protein's info to transcript.
"""
from newtbase.models import Transcript, Blast
for blast_record in blast_records_in_object_and_list:
for al in blast_record.alignments:
try:
al.hit_url = "/tgm_contig/?contig={}".format(str(al.hit_def))
al.hit_protein_name = "---"
t = Transcript.objects.get(transcript_id=al.hit_def)
b = Blast.objects.filter(transcript=t, database="Swissprot")
if len(b) > 0:
al.hit_protein_name = "{} / {}".format(b[0].accession_fk.gene_name, b[0].accession_fk.protein_name)
except Exception as inst:
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst) # __str__ allows args to be printed directly
return blast_records_in_object_and_list
|
michal-stuglik/newtbase
|
newtbase/utils.py
|
Python
|
gpl-2.0
| 1,135
|
[
"BLAST"
] |
c9d2c1b58b5665c2bc460015c598948c18cbea395ca121214e3abaca5053e889
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
import os
import sys
import platform
import struct
import psycopg2
from storm.tracer import BaseStatementTracer, install_tracer
try:
from sqlparse import engine, filters, sql
has_sqlparse = True
except ImportError:
has_sqlparse = False
# http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
def getTerminalSize():
if platform.system() != 'Linux':
return 80, 20
import fcntl
import termios
env = os.environ
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return 80, 20
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
if has_sqlparse:
class MyReindentFilter(filters.ReindentFilter):
def __init__(self, max_width):
self.max_width = max_width
filters.ReindentFilter.__init__(self)
def _process_identifierlist(self, tlist):
identifiers = list(tlist.get_identifiers())
if len(identifiers) > 1 and not tlist.within(sql.Function):
# This is not working in some cases
# first = list(identifiers[0].flatten())[0]
# num_offset = self._get_offset(first) - len(first.value)
num_offset = 7
self.offset += num_offset
width = self.offset
for token in identifiers:
width += len(str(token)) + 2
if width > self.max_width:
tlist.insert_before(token, self.nl())
width = self.offset + len(str(token))
self.offset -= num_offset
return True
def format_sql(statement, prefix_length=0):
width, height = getTerminalSize()
stack = engine.FilterStack()
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
stack.stmtprocess.append(MyReindentFilter(width - 30))
stack.postprocess.append(filters.SerializerUnicode())
statement = ''.join(stack.run(statement))
lines = statement.split('\n')
new_lines = [lines[0]]
for line in lines[1:]:
new_lines.append(' ' * prefix_length + line)
statement = '\n'.join(new_lines)
return statement
class StoqlibDebugTracer(BaseStatementTracer):
ATTRIBUTES = dict(bold=1, dark=2, underline=4, blink=5,
reverse=7, concealed=8)
COLORS = dict(grey=30, red=31, green=32, yellow=33, blue=34,
magenta=35, cyan=36, white=37)
RESET = '\033[0m' # pylint: disable=W1401
def __init__(self, stream=None):
# This colors will be used to highlight the transaction
self._available_colors = ['blue', 'green', 'magenta', 'yellow', 'cyan',
'red']
self._current_color = 0
# Mapping pid > color
self._transactions = {}
# Mapping pid > query count
self._transactions_count = {}
if stream is None:
stream = sys.stderr
self._stream = stream
def _colored(self, text, color=None, attrs=None):
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s' # pylint: disable=W1401
if color is not None:
text = fmt_str % (self.COLORS[color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (self.ATTRIBUTES[attr], text)
text += self.RESET
return text
def _format_statement(self, statement, header_size):
if has_sqlparse:
statement = format_sql(statement, header_size)
replaces = []
if statement.startswith('SELECT'):
color = 'blue'
replaces = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'JOIN', 'LEFT',
'AND', 'OR', 'ORDER BY']
elif statement.startswith('UPDATE'):
color = 'yellow'
replaces = ['UPDATE', 'SET', 'WHERE']
elif statement.startswith('INSERT INTO transaction_entry'):
# transaction entry inserting is quite common and always the same query.
# Make it less prominent
statement = self._colored(statement, 'white')
elif statement.startswith('INSERT'):
color = 'green'
replaces = ['INSERT', 'INTO', 'VALUES']
elif statement.startswith('DELETE'):
color = 'red'
replaces = ['DELETE', 'FROM', 'WHERE']
for i in replaces:
statement = statement.replace(i + ' ', self._colored(i, color) + ' ')
statement = statement.replace(i + '\n', self._colored(i, color) + '\n')
return statement
def write(self, msg):
self._stream.write(msg)
self._stream.flush()
def header(self, pid, color, header, tail='\n'):
pid = self._colored('%s' % pid, color)
header = self._colored('%5s' % header, 'grey', ['bold'])
self.write("[%s %s]%s" % (pid, header, tail))
def _expanded_raw_execute(self, transaction, raw_cursor, statement):
pid = raw_cursor.connection.get_backend_pid()
self._transactions_count.setdefault(pid, 0)
self._transactions_count[pid] += 1
count = self._transactions_count[pid]
header_size = 9 + len(str(pid))
color = self._get_transaction_color(pid)
pid = self._colored(pid, color)
self._start_time = datetime.datetime.now()
self.statement = self._format_statement(statement, header_size)
# Dont write new line, so we can print the time at the end
self.header(pid, color, count, tail=' ')
self.write(self.statement + '\n')
def connection_raw_execute_success(self, transaction, raw_cursor, statement,
params):
pid = raw_cursor.connection.get_backend_pid()
header_size = 9 + len(str(pid))
now = datetime.datetime.now()
duration = now - self._start_time
seconds = duration.seconds + float(duration.microseconds) / 10 ** 6
rows = raw_cursor.rowcount
text = '%s%s seconds | %s rows | %s' % (
' ' * header_size,
self._colored(seconds, attrs=['bold']),
self._colored(rows, attrs=['bold']),
self._colored(now.strftime('%F %T.%f'), attrs=['bold']))
if statement.startswith('INSERT') and rows == 1:
try:
rowid = raw_cursor.fetchone()[0]
raw_cursor.scroll(-1)
text += ' | id: ' + self._colored(repr(rowid), attrs=['bold'])
except psycopg2.ProgrammingError:
text = ''
self.write(text + '\n')
def _get_transaction_color(self, pid):
if pid not in self._transactions:
self._transactions[pid] = self._available_colors[self._current_color]
self._current_color += 1
if self._current_color == len(self._available_colors):
self._current_color = 0
return self._transactions[pid]
def transaction_create(self, store):
pid = store._connection._raw_connection.get_backend_pid()
color = self._get_transaction_color(pid)
self.header(pid, color, 'BEGIN')
def transaction_commit(self, store):
pid = store._connection._raw_connection.get_backend_pid()
color = self._get_transaction_color(pid)
self.header(pid, color, 'COMIT')
def transaction_rollback(self, store, xid=None):
pid = store._connection._raw_connection.get_backend_pid()
color = self._get_transaction_color(pid)
self.header(pid, color, 'ROLLB')
def transaction_close(self, store):
pid = store._connection._raw_connection.get_backend_pid()
color = self._get_transaction_color(pid)
del self._transactions[pid]
self.header(pid, color, 'CLOSE')
def enable():
install_tracer(StoqlibDebugTracer())
|
andrebellafronte/stoq
|
stoqlib/database/debug.py
|
Python
|
gpl-2.0
| 9,291
|
[
"VisIt"
] |
f930a6dc0f6836e2f5aa78226b53fd6dc62ec8510a67c65c61acfa4d04db6b56
|
from __future__ import division, absolute_import
import astropy.stats
import glob
import math
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import pandas as pd
from scipy import optimize,spatial
###############################################################################
###############################################################################
###############################################################################
__author__ =['Victor Calderon']
__copyright__ =["Copyright 2016 Victor Calderon, Index function"]
__email__ =['victor.calderon@vanderbilt.edu']
__maintainer__ =['Victor Calderon']
def Index(directory, datatype):
"""
Indexes the files in a directory `directory' with a
specific data type.
Parameters
----------
directory: str
Absolute path to the folder that is indexed.
datatype: str
Data type of the files to be indexed in the folder.
Returns
-------
file_array: array_like
np.array of indexed files in the folder 'directory'
with specific datatype.
Examples
--------
>>> Index('~/data', '.txt')
>>> array(['A.txt', 'Z'.txt', ...])
"""
assert(os.path.exists(directory))
files = np.array(glob.glob('{0}/*{1}'.format(directory, datatype)))
return files
###############################################################################
def myceil(x, base=10):
"""
Returns the upper-bound integer of 'x' in base 'base'.
Parameters
----------
x: float
number to be approximated to closest number to 'base'
base: float
base used to calculate the closest 'largest' number
Returns
-------
n_high: float
Closest float number to 'x', i.e. upper-bound float.
Example
-------
>>>> myceil(12,10)
20
>>>>
>>>> myceil(12.05, 0.1)
12.10000
"""
n_high = float(base*math.ceil(float(x)/base))
return n_high
###############################################################################
def myfloor(x, base=10):
"""
Returns the lower-bound integer of 'x' in base 'base'
Parameters
----------
x: float
number to be approximated to closest number of 'base'
base: float
base used to calculate the closest 'smallest' number
Returns
-------
n_low: float
Closest float number to 'x', i.e. lower-bound float.
Example
-------
>>>> myfloor(12, 5)
>>>> 10
"""
n_low = float(base*math.floor(float(x)/base))
return n_low
###############################################################################
def Bins_array_create(arr, base=10):
"""
Generates array between [arr.min(), arr.max()] in steps of `base`.
Parameters
----------
arr: array_like, Shape (N,...), One-dimensional
Array of numerical elements
base: float, optional (default=10)
Interval between bins
Returns
-------
bins_arr: array_like
Array of bin edges for given arr
"""
base = float(base)
arr = np.array(arr)
assert(arr.ndim==1)
arr_min = myfloor(arr.min(), base=base)
arr_max = myceil( arr.max(), base=base)
bins_arr = np.arange(arr_min, arr_max+0.5*base, base)
return bins_arr
###############################################################################
###############################################################################
###############################################################################
def sph_to_cart(ra,dec,cz):
"""
Converts spherical coordinates to Cartesian coordinates.
Parameters
----------
ra: array-like
right-ascension of galaxies in degrees
dec: array-like
declination of galaxies in degrees
cz: array-like
velocity of galaxies in km/s
Returns
-------
coords: array-like, shape = N by 3
x, y, and z coordinates
"""
cz_dist = cz/70. #converts velocity into distance
x_arr = cz_dist*np.cos(np.radians(ra))*np.cos(np.radians(dec))
y_arr = cz_dist*np.sin(np.radians(ra))*np.cos(np.radians(dec))
z_arr = cz_dist*np.sin(np.radians(dec))
coords = np.column_stack((x_arr,y_arr,z_arr))
return coords
############################################################################
def calc_dens(n_val,r_val):
"""
Returns densities of spheres with radius being the distance to the
nth nearest neighbor.
Parameters
----------
n_val = integer
The 'N' from Nth nearest neighbor
r_val = array-like
An array with the distances to the Nth nearest neighbor for
each galaxy
Returns
-------
dens: array-like
An array with the densities of the spheres created with radii
to the Nth nearest neighbor.
"""
dens = np.array([(3.*(n_val+1)/(4.*np.pi*r_val[hh]**3)) \
for hh in range(len(r_val))])
return dens
###############################################################################
def plot_calcs(mass,bins,dlogM,mass_err=False,ratio_err=False):
"""
Returns values for plotting the stellar mass function and
mass ratios
Parameters
----------
mass: array-like
A 1D array with mass values, assumed to be in order
bins: array=like
A 1D array with the values which will be used as the bin edges
by the histogram function
dlogM: float-like
The log difference between bin edges
Optional
--------
mass_err == True
Calculates the Poisson errors on the stellar mass function.
Returns mass_freq as a list with 2 array elements, the first being
the stellar mass function values, the second being the errors.
ratio_err == True
Calculates the Poisson errors on the density-based, mass ratios.
Creates empty list and appends ratio error arrays to it as they
are generated. Returns ratio_dict as a list. The first element is
a dictionary with the ratio values to be plotted. The second is a
three element list. Each element is an array with the error values
for each of the three density-cut ratios.
Returns
-------
bin_centers: array-like
An array with the medians mass values of the mass bins
mass-freq: array-like
Contains the number density values of each mass bin
ratio_dict: dictionary-like
A dictionary with three keys, corresponding to the divisors
2,4, and 10 (as the percentile cuts are based on these
divisions). Each key has the density-cut, mass ratios for
that specific cut (50/50 for 2; 25/75 for 4; 10/90 for 10).
"""
mass_counts, edges = np.histogram(mass,bins)
bin_centers = 0.5*(edges[:-1]+edges[1:])
mass_freq = mass_counts/float(len(mass))/dlogM
ratio_dict = {}
frac_val = [2,4,10]
if ratio_err == True:
yerr = []
for ii in frac_val:
ratio_dict[ii] = {}
# Calculations for the lower density cut
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
# Calculations for the higher density cut
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
# Ratio determination
ratio_counts = (1.*counts_2)/(1.*counts)
ratio_dict[ii] = ratio_counts
if ratio_err == True:
yerr.append((counts_2*1.)/(counts*1.)*\
np.sqrt(1./counts + 1./counts_2))
if mass_err == True:
mass_freq_list = [[] for xx in xrange(2)]
mass_freq_list[0] = mass_freq
mass_freq_list[1] = np.sqrt(mass_counts)/float(len(mass))/dlogM
mass_freq = np.array(mass_freq_list)
if ratio_err == True:
ratio_dict_list = [[] for xx in range(2)]
ratio_dict_list[0] = ratio_dict
ratio_dict_list[1] = yerr
ratio_dict = ratio_dict_list
return bin_centers, mass_freq, ratio_dict
###############################################################################
def bin_func(mass_dist,bins,kk,bootstrap=False):
"""
Returns median distance to Nth nearest neighbor
Parameters
----------
mass_dist: array-like
An array with mass values in at index 0 (when transformed) and distance
to the Nth nearest neighbor in the others
Example: 6239 by 7
Has mass values and distances to 6 Nth nearest neighbors
bins: array=like
A 1D array with the values which will be used as the bin edges
kk: integer-like
The index of mass_dist (transformed) where the appropriate distance
array may be found
Optional
--------
bootstrap == True
Calculates the bootstrap errors associated with each median distance
value. Creates an array housing arrays containing the actual distance
values associated with every galaxy in a specific bin. Bootstrap error
is then performed using astropy, and upper and lower one sigma values
are found for each median value. These are added to a list with the
median distances, and then converted to an array and returned in place
of just 'medians.'
Returns
-------
medians: array-like
An array with the median distance to the Nth nearest neighbor from
all the galaxies in each of the bins
"""
edges = bins
# print 'length bins:'
# print len(bins)
digitized = np.digitize(mass_dist.T[0],edges)
digitized -= int(1)
bin_nums = np.unique(digitized)
bin_nums_list = list(bin_nums)
# if 12 not in bin_nums:
# bin_nums.append(12)
# if 13 in bin_nums:
# bin_nums.remove(13
# if 13 not in bin_nums:
# bin_nums.append(13)
# if 14 in bin_nums:
# bin_nums.remove(14)
for jj in range(len(bins)-1):
if jj not in bin_nums:
bin_nums_list.append(jj)
# print 'appended'
# print bin_nums_list
if (len(bins)-1) in bin_nums_list:
bin_nums_list.remove(len(bins)-1)
# print 'removed'
# print bin_nums_list
if (len(bins)) in bin_nums_list:
bin_nums_list.remove(len(bins))
# print 'removed'
# print bin_nums_list
bin_nums = np.array(bin_nums_list)
for ii in bin_nums:
if len(mass_dist.T[kk][digitized==ii]) == 0:
temp_list = list(mass_dist.T[kk]\
[digitized==ii])
temp_list.append(np.zeros(len(bin_nums)))
mass_dist.T[kk][digitized==ii] = np.array(temp_list)
# print bin_nums
# print len(bin_nums)
medians = np.array([np.median(mass_dist.T[kk][digitized==ii]) \
for ii in bin_nums])
# print len(medians)
if bootstrap == True:
dist_in_bin = np.array([(mass_dist.T[kk][digitized==ii]) \
for ii in bin_nums])
for vv in range(len(dist_in_bin)):
if len(dist_in_bin[vv]) == 0:
dist_in_bin_list = list(dist_in_bin[vv])
dist_in_bin[vv] = np.zeros(len(dist_in_bin[0]))
low_err_test = np.array([np.percentile(astropy.stats.bootstrap\
(dist_in_bin[vv],bootnum=1000,bootfunc=np.median),16) \
for vv in range(len(dist_in_bin))])
high_err_test = np.array([np.percentile(astropy.stats.bootstrap\
(dist_in_bin[vv],bootnum=1000,bootfunc=np.median),84) \
for vv in range(len(dist_in_bin))])
med_list = [[] for yy in range(len(frac_vals))]
med_list[0] = medians
med_list[1] = low_err_test
med_list[2] = high_err_test
medians = np.array(med_list)
return medians
###############################################################################
def hist_calcs(mass,bins,dlogM,eco=False):
"""
Returns dictionaries with the counts for the upper
and lower density portions; calculates the
three different percentile cuts for each mass
array given
Parameters
----------
mass: array-like
A 1D array with log stellar mass values, assumed
to be an order which corresponds to the ascending
densities; (necessary, as the index cuts are based
on this)
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
Returns
-------
hist_dict_low: dictionary-like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the lower density cut
hist_dict_high: dictionary like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the higher density cut
"""
hist_dict_low = {}
hist_dict_high = {}
frac_val = np.array([2,4,10])
frac_dict = {2:0,4:1,10:2}
if eco == True:
low_err = [[] for xx in xrange(len(frac_val))]
high_err = [[] for xx in xrange(len(frac_val))]
for ii in frac_val:
hist_dict_low[ii] = {}
hist_dict_high[ii] = {}
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
if eco == True:
low_err[frac_dict[ii]] = np.sqrt(counts)/len(frac_mass)/dlogM
hist_dict_low[ii] = low_counts
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
if eco == True:
high_err[frac_dict[ii]] = np.sqrt(counts_2)/len(frac_mass_2)/\
dlogM
hist_dict_high[ii] = high_counts
if eco == True:
hist_dict_low['low_err'] = low_err
hist_dict_high['high_err'] = high_err
return hist_dict_low, hist_dict_high
###############################################################################
def plot_all_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx):
"""
Returns a plot showing the density-cut, mass ratio. Optimally
used with a well-initiated for-loop
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the ratio values for each mass bin
neigh_val: integer-like
Value which will be inserted into the text label of each plot
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Returns
-------
Figure with three subplots showing appropriate ratios
"""
if plot_idx ==16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
if col_num ==0:
title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==1:
title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==2:
title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
ax.set_xlim(9.1,11.9)
ax.set_ylim([0,5])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([1.,3.])
ax.tick_params(axis='both', labelsize=12)
ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
ax.plot(bin_centers,y_vals,color='silver')
###############################################################################
def plot_eco_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx,only=False):
"""
Returns subplots of ECO density-cut,mass ratios
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the ratio values for each mass bin
neigh_val: integer-like
Value which will be inserted into the text label of each plot
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Optional
--------
only == True
To be used when only plotting the ECO ratios, no mocks.
Will add in the additional plotting specifications that
would have been taken care of previously in a for-loop
which plotted the mocks as well
Returns
-------
ECO ratios plotted to any previously initialized figure
"""
if only == True:
if plot_idx ==16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
if col_num ==0:
title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==1:
title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==2:
title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
ax.set_xlim(9.1,11.9)
ax.set_ylim([0,5])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([1.,3.])
ax.tick_params(axis='both', labelsize=12)
ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
frac_vals = np.array([2,4,10])
y_vals_2 = y_vals[0][frac_vals[hh]]
ax.errorbar(bin_centers,y_vals_2,yerr=y_vals[1][hh],\
color='dodgerblue',linewidth=2)
###############################################################################
def plot_hists(mass,neigh_val,bins,dlogM,ax,col_num,plot_idx):
"""
Returns a plot showing the density-cut, mass counts.
Parameters
----------
mass: array-like
A 1D array with log stellar mass values
neigh_val: integer-like
Value which will be inserted into the text label of each plot
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Returns
-------
Figure with two curves, optionally (if uncommented) plotted in step
"""
ax.set_yscale('log')
if col_num==0:
title_label = 'Mass 50/50, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num==1:
title_label = 'Mass 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num==2:
title_label = 'Mass 10/90, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
if plot_idx == 16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.set_xlim(9.1,11.9)
ax.set_ylim([10**-3,10**1])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([10**-2,10**0])
frac_data = (len(mass)/frac_val)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
bins_cens = .5*(edges[:-1]+edges[1:])
# ax.step(bins_cens, low_counts, color='lightslategrey',where='mid',\
# alpha=0.1)
ax.plot(bins_cens, low_counts, color='lightslategrey',alpha=0.1)
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
# ax.step(bins_cens, high_counts, color='lightslategray',where='mid',\
# alpha=0.1)
ax.plot(bins_cens, high_counts, color='lightslategray',alpha=0.1)
# res = np.array([low_counts,high_counts])
# return res
###############################################################################
def plot_eco_hists(mass,bins,dlogM,ax,col,plot_idx):
if col==0:
frac_val = 2
elif col==1:
frac_val = 4
elif col==2:
frac_val = 10
frac_data = (len(mass)/frac_val)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
bins_cens = .5*(edges[:-1]+edges[1:])
ax.step(bins_cens, (counts/float(len(frac_mass))/dlogM), color='lime',\
where='mid',label='Lower')
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
ax.step(bins_cens, (counts_2/float(len(frac_mass_2))/dlogM), \
color='dodgerblue',where='mid',label='Higher')
if plot_idx == 0:
ax.legend(loc='best')
###############################################################################
def plot_all_meds(bin_centers,y_vals,ax,plot_idx):
"""
Returns six subplots showing the median distance to
the Nth nearest neighbor for each mass bin. Assumes a
previously defined figure. Best used in a for-loop
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the median distance values for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
the text label in each subplot
Returns
-------
Subplots displaying the median distance to Nth nearest neighbor
trends for each mass bin
"""
titles = [1,2,3,5,10,20]
ax.set_ylim(0,10**1.5)
ax.set_xlim(9.1,11.9)
ax.set_yscale('symlog')
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.set_yticks(np.arange(0,12,1))
ax.set_yticklabels(np.arange(1,11,2))
ax.tick_params(axis='x', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=20)
ax.plot(bin_centers,y_vals,color='silver')
#############################################################################
def plot_eco_meds(bin_centers,y_vals,low_lim,up_lim,ax,plot_idx,only=False):
"""
Returns six subplots showing the median Nth nearest neighbor distance for
ECO galaxies in each mass bin
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the median distance values for each mass bin
low_lim: array-like
An array with the lower cut-off of the bootstrap errors for each median
up_lim: array-like
An array with the upper cut-off of the bootstrap errors for each median
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
the text label in each subplot
Optional
--------
only == False
To be used when only plotting the ECO median trends,
no mocks. Will add in the additional plotting
specifications that would have been taken care of
previously in a for-loop which plotted the mocks as well
Returns
-------
Subplots displaying the median distance to Nth nearest neighbor
trends for each mass bin, with the bootstrap errors
"""
if only == True:
titles = [1,2,3,5,10,20]
ax.set_ylim(0,10**1.5)
ax.set_xlim(9.1,11.9)
ax.set_yscale('symlog')
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.tick_params(axis='both', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.errorbar(bin_centers,y_vals,yerr=0.1,lolims=low_lim,\
uplims=up_lim,color='dodgerblue',label='ECO')
# if plot_idx == 5:
# ax.legend(loc='best')
###############################################################################
def plot_bands(bin_centers,upper,lower,ax):
"""
Returns an overlayed, fill-between plot, creating a band
between the different mock catalog values plotted
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
upper: array-like
Array with the max y-values among all the mocks
for each mass bin
lower: array-like
Array with the min y-values among all the mocks
for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
Returns
-------
A semi-transparent band overlaying the area of the plot
bordedodgerblue by the mocks
"""
ax.fill_between(bin_centers,upper,lower,color='silver',alpha=0.1)
###############################################################################
def plot_med_range(bin_centers,low_lim,up_lim,ax,alpha,color='gray'):
"""
Returns a plot with a transparent band highlighting a range of
values.
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
low_lim: array-like
Array with the min y-values among all the mocks
for each mass bin
up_lim: array-like
Array with the max y-values among all the mocks
for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
alpha: float-like
A value which will determine the tranparency of the band
color: str
Any color which Python recognizes; sets the color of the band
Returns
-------
A band spanning from the max y-values to the minimum.
"""
ax.fill_between(bin_centers,low_lim,up_lim,color=color,alpha=alpha)
##############################################################################
##############################################################################
##############################################################################
dirpath = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density"
dirpath += r"\Catalogs\Resolve_plk_5001_so_mvir_scatter_ECO_Mocks_"
dirpath += r"scatter_mocks\Resolve_plk_5001_so_mvir_scatter0p1_ECO_Mocks"
usecols = (0,1,8,13)
dlogM = 0.2
##Add in column 4, HALO ID
##############################################################################
##############################################################################
##############################################################################
ECO_cats = (Index(dirpath,'.dat'))
names = ['ra','dec','cz','logMstar']
PD = [(pd.read_csv(ECO_cats[ii],sep="\s+", usecols= usecols,header=None,\
skiprows=2,names=names)) for ii in range(len(ECO_cats))]
PD_comp = [(PD[ii][PD[ii].logMstar >= 9.1]) for ii in range(len(ECO_cats))]
# PD_test = [(PD[ii][PD[ii].logMstar >= 11.7]) for ii in range(len(ECO_cats))]
# for ii in range(len(PD_test)):
# print len(PD_test[ii])
min_max_mass_arr = []
for ii in range(len(PD_comp)):
min_max_mass_arr.append(max(PD_comp[ii].logMstar))
min_max_mass_arr.append(min(PD_comp[ii].logMstar))
min_max_mass_arr = np.array(min_max_mass_arr)
bins = Bins_array_create(min_max_mass_arr,dlogM)
bins+= 0.1
bins_list = list(bins)
for ii in bins:
if ii > 11.7:
bins_list.remove(ii)
bins = np.array(bins_list)
num_of_bins = int(len(bins) - 1)
ra_arr = np.array([(np.array(PD_comp[ii])).T[0] \
for ii in range(len(PD_comp))])
dec_arr = np.array([(np.array(PD_comp[ii])).T[1] \
for ii in range(len(PD_comp))])
cz_arr = np.array([(np.array(PD_comp[ii])).T[2] \
for ii in range(len(PD_comp))])
mass_arr = np.array([(np.array(PD_comp[ii])).T[3] \
for ii in range(len(PD_comp))])
coords_test = np.array([sph_to_cart(ra_arr[vv],dec_arr[vv],cz_arr[vv]) \
for vv in range(len(ECO_cats))])
neigh_vals = np.array([1,2,3,5,10,20])
nn_arr = [[] for xx in xrange(len(coords_test))]
nn_arr_nn = [[] for yy in xrange(len(neigh_vals))]
for vv in range(len(coords_test)):
nn_arr[vv] = spatial.cKDTree(coords_test[vv])
nn_arr[vv] = np.array(nn_arr[vv].query(coords_test[vv],21)[0])
nn_specs = [(np.array(nn_arr).T[ii].T[neigh_vals].T) for ii in \
range(len(coords_test))]
nn_mass_dist = np.array([(np.column_stack((mass_arr[qq],nn_specs[qq]))) \
for qq in range(len(coords_test))])
###############################################################################
sat_cols = (13,25)
sat_names = ['logMstar','cent_sat_flag']
SF_PD = [(pd.read_csv(ECO_cats[ii],sep="\s+", usecols= sat_cols,\
header=None,
skiprows=2,names=sat_names)) for ii in range(8)]
SF_PD_comp = [(SF_PD[ii][SF_PD[ii].logMstar >= 9.1]) for ii in \
range(len(ECO_cats))]
sats_num = np.array([(len(SF_PD_comp[ii][SF_PD_comp[ii].cent_sat_flag==0])) \
for ii in range(len(SF_PD_comp))])
cents_num = np.array([(len(SF_PD_comp[ii][SF_PD_comp[ii].cent_sat_flag==1])) \
for ii in range(len(SF_PD_comp))])
gal_tot = np.array([(len(SF_PD_comp[ii])) for ii in range(len(SF_PD_comp))])
print 'SAT_FRAC = {0}'.format(sats_num/gal_tot)
###############################################################################
nn_dist = {}
nn_dens = {}
mass_dat = {}
ratio_info = {}
mass_freq = [[] for xx in xrange(len(coords_test))]
for ii in range(len(coords_test)):
nn_dist[ii] = {}
nn_dens[ii] = {}
mass_dat[ii] = {}
ratio_info[ii] = {}
nn_dist[ii]['mass'] = nn_mass_dist[ii].T[0]
for jj in range(len(neigh_vals)):
nn_dist[ii][(neigh_vals[jj])] = np.array(nn_mass_dist[ii].T\
[range(1,len(neigh_vals)+1)[jj]])
nn_dens[ii][(neigh_vals[jj])] = np.column_stack((nn_mass_dist[ii].T\
[0],calc_dens(neigh_vals[jj],\
nn_mass_dist[ii].T[range(1,len\
(neigh_vals)+1)[jj]])))
idx = np.array([nn_dens[ii][neigh_vals[jj]].T[1].argsort()])
mass_dat[ii][(neigh_vals[jj])] = (nn_dens[ii][neigh_vals[jj]]\
[idx].T[0])
bin_centers, mass_freq[ii], ratio_info[ii][neigh_vals[jj]] = \
plot_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM)
all_mock_meds = [[] for xx in range(len(nn_mass_dist))]
for vv in range(len(nn_mass_dist)):
all_mock_meds[vv] = np.array([bin_func(nn_mass_dist[vv],bins,(jj+1)) \
for jj in range(len(nn_mass_dist[vv].T)-1)])
med_plot_arr = [([[] for yy in xrange(len(nn_mass_dist))]) \
for xx in xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
med_plot_arr[ii][jj] = all_mock_meds[jj][ii]
# for ii in range(len(neigh_vals)):
# for jj in range(len(nn_mass_dist)):
# print len(all_mock_meds[jj][ii])
mass_freq_plot = (np.array(mass_freq))
max_lim = [[] for xx in range(len(mass_freq_plot.T))]
min_lim = [[] for xx in range(len(mass_freq_plot.T))]
for jj in range(len(mass_freq_plot.T)):
max_lim[jj] = max(mass_freq_plot.T[jj])
min_lim[jj] = min(mass_freq_plot.T[jj])
###############################################################################
# ordered_mass = nn_mass_dist[0].T[0][(nn_mass_dist[0].T[0].argsort())]
# dist_cont = [[[[] for zz in xrange(len(bins)-1)] for yy in \
# xrange(len(nn_mass_dist))] for xx in \
# xrange(1,len(nn_mass_dist[0].T))]
# for ii in xrange(len(nn_mass_dist)):
# sorting_test = np.digitize(nn_mass_dist[ii].T[0],bins)
# bin_nums = np.unique(sorting_test)
# bin_nums_list = list(bin_nums)
# # if 13 not in bin_nums:
# # bin_nums_list.append(13)
# # if 14 not in bin_nums:
# # bin_nums_list.append(14)
# # bin_nums = np.array(bin_nums_list)
# # if 14 in bin_nums_list:
# # bin_nums_list.remove(14)
# # bin_nums = np.array(bin_nums_list)
# for dd in range(1,num_of_bins+1):
# if dd not in bin_nums:
# bin_nums_list.append(dd)
# if len(bins) in bin_nums_list:
# bin_nums_list.remove(len(bins))
# bin_nums = np.array(bin_nums_list)
# for jj in xrange(1,len(nn_mass_dist[ii].T)):
# for hh in bin_nums:
# dist_cont[jj-1][ii][hh-1] = (nn_mass_dist[ii].T[jj]\
# [sorting_test==hh])
# if len(dist_cont[jj-1][ii][hh-1]) == 0:
# (dist_cont[jj-1][ii][hh-1]) = list(dist_cont[jj-1][ii][hh-1])
# (dist_cont[jj-1][ii][hh-1]).append(np.zeros\
# (len(dist_cont[1][0][0])))
# (dist_cont[jj-1][ii][hh-1]) = np.array((dist_cont[jj-1][ii]\
# [hh-1]))
# for ii in xrange(len(nn_mass_dist)):
# for jj in xrange(1,len(nn_mass_dist[ii].T)):
# for hh in bin_nums:
# print len(dist_cont[jj-1][ii][hh-1])
###############################################################################
# top_68 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# low_68 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# top_95 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# low_95 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# med_50 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# for aa in xrange(len(neigh_vals)):
# for bb in xrange(len(nn_mass_dist)):
# for cc in xrange(len(dist_cont[aa][bb])):
# top_68[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],84)
# low_68[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],16)
# top_95[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],97.5)
# low_95[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],2.5)
# med_50[aa][bb][cc] = np.median((dist_cont[aa][bb][cc]))
# top_68 = np.array(top_68)
# low_68 = np.array(low_68)
# top_95 = np.array(top_95)
# low_95 = np.array(low_95)
# med_50 = np.array(med_50)
##not working with 1 dec scatter...
###############################################################################
frac_vals = [2,4,10]
nn_plot_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
nn_plot_arr[ii][jj] = (ratio_info[jj][neigh_vals[ii]])
plot_frac_arr = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(nn_plot_arr))]
for jj in range(len(nn_mass_dist)):
for hh in range(len(frac_vals)):
for ii in range(len(neigh_vals)):
plot_frac_arr[ii][hh][jj] = nn_plot_arr[ii][jj][frac_vals[hh]]
###############################################################################
###############################################################################
###############################################################################
eco_path = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density"
eco_path += r"\Catalogs\ECO_true"
eco_cols = np.array([0,1,2,4])
###############################################################################
###############################################################################
###############################################################################
ECO_true = (Index(eco_path,'.txt'))
names = ['ra','dec','cz','logMstar']
PD_eco = pd.read_csv(ECO_true[0],sep="\s+", usecols=(eco_cols),header=None,\
skiprows=1,names=names)
eco_comp = PD_eco[PD_eco.logMstar >= 9.1]
ra_eco = (np.array(eco_comp)).T[0]
dec_eco = (np.array(eco_comp)).T[1]
cz_eco = (np.array(eco_comp)).T[2]
mass_eco = (np.array(eco_comp)).T[3]
coords_eco = sph_to_cart(ra_eco,dec_eco,cz_eco)
eco_neighbor_tree = spatial.cKDTree(coords_eco)
eco_tree_dist = np.array(eco_neighbor_tree.query(coords_eco,\
(neigh_vals[-1]+1))[0])
eco_mass_dist = np.column_stack((mass_eco,eco_tree_dist.T[neigh_vals].T))
##range 1,7 because of the six nearest neighbors (and fact that 0 is mass)
##the jj is there to specify which index in the [1,6] array
eco_dens = ([calc_dens(neigh_vals[jj],\
(eco_mass_dist.T[range(1,7)[jj]])) for jj in range\
(len(neigh_vals))])
eco_mass_dens = [(np.column_stack((mass_eco,eco_dens[ii]))) for ii in \
range(len(neigh_vals))]
eco_idx = [(eco_mass_dens[jj].T[1].argsort()) for jj in \
range(len(neigh_vals))]
eco_mass_dat = [(eco_mass_dens[jj][eco_idx[jj]].T[0]) for jj in \
range(len(neigh_vals))]
eco_ratio_info = [[] for xx in xrange(len(eco_mass_dat))]
for qq in range(len(eco_mass_dat)):
bin_centers, eco_freq, eco_ratio_info[qq] = plot_calcs(eco_mass_dat[qq],\
bins,dlogM,mass_err=True,ratio_err=True)
eco_medians = [[] for xx in xrange(len(eco_mass_dat))]
for jj in (range(len(eco_mass_dat))):
eco_medians[jj] = np.array(bin_func(eco_mass_dist,bins,(jj+1),\
bootstrap=True))
###############################################################################
###############################################################################
fig,ax = plt.subplots(figsize=(8,8))
ax.set_title('Mass Distribution',fontsize=18)
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.set_ylabel(r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$',fontsize=20)
ax.set_yscale('log')
ax.set_xlim(9.1,11.9)
ax.tick_params(axis='both', labelsize=14)
for ii in range(len(mass_freq)):
ax.plot(bin_centers,mass_freq[ii],color='silver')
ax.fill_between(bin_centers,max_lim,min_lim,color='silver',alpha=0.1)
ax.errorbar(bin_centers,eco_freq[0],yerr=eco_freq[1],color='dodgerblue',\
linewidth=2,label='ECO')
ax.legend(loc='best')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.94,\
hspace=0.2,wspace=0.2)
plt.show()
###############################################################################
A = {}
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
coln_dict = {2:0,4:1,10:2}
nn_keys = np.sort(nn_dict.keys())
col_keys = np.sort(coln_dict.keys())
zz_num = len(plot_frac_arr[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(zz_num):
zz_arr = np.array(plot_frac_arr[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(zz_arr)
if cc == 0:
zz_tot = np.zeros((n_elem,1))
zz_tot = np.insert(zz_tot,len(zz_tot.T),zz_arr,1)
zz_tot = np.array(np.delete(zz_tot,0,axis=1))
for kk in xrange(len(zz_tot)):
zz_tot[kk][zz_tot[kk] == np.inf] = np.nan
zz_tot_max = [np.nanmax(zz_tot[kk]) for kk in xrange(len(zz_tot))]
zz_tot_min = [np.nanmin(zz_tot[kk]) for kk in xrange(len(zz_tot))]
A[bin_str] = [zz_tot_max,zz_tot_min]
###############################################################################
np.seterr(divide='ignore',invalid='ignore')
nrow_num = int(6)
ncol_num = int(3)
zz = int(0)
fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num, \
figsize=(100,200), sharex= True,sharey=True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, 'High Density Counts/Lower Density Counts', ha='center', \
va='center',rotation='vertical',fontsize=20)
# fig.suptitle("Percentile Trends", fontsize=18)
while zz <= 16:
for ii in range(len(eco_ratio_info)):
for hh in range(len(eco_ratio_info[0][1])):
for jj in range(len(nn_mass_dist)):
# upper = A['{0}_{1}'.format(neigh_vals[ii],frac_vals[hh])][0]
# lower = A['{0}_{1}'.format(neigh_vals[ii],frac_vals[hh])][1]
# plot_bands(bin_centers,upper,lower,axes_flat[zz] )
plot_all_rats(bin_centers,(plot_frac_arr[ii][hh][jj]),\
neigh_vals[ii],axes_flat[zz],hh,zz)
plot_eco_rats(bin_centers,(eco_ratio_info[ii]),neigh_vals[ii],\
axes_flat[zz],hh,zz)
zz += 1
plt.subplots_adjust(left=0.04, bottom=0.09, right=0.98, top=0.98,\
hspace=0,wspace=0)
plt.show()
###############################################################################
B = {}
yy_num = len(med_plot_arr[0])
for nn in range(len(med_plot_arr)):
for ii in range(yy_num):
med_str = '{0}'.format(nn)
yy_arr = med_plot_arr[nn][ii]
n_y_elem = len(yy_arr)
if ii == 0:
yy_tot = np.zeros((n_y_elem,1))
yy_tot = np.insert(yy_tot,len(yy_tot.T),yy_arr,1)
yy_tot = np.array(np.delete(yy_tot,0,axis=1))
yy_tot_max = [np.nanmax(yy_tot[kk]) for kk in xrange(len(yy_tot))]
yy_tot_min = [np.nanmin(yy_tot[kk]) for kk in xrange(len(yy_tot))]
B[med_str] = [yy_tot_max,yy_tot_min]
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, 'Distance to Nth Neighbor (Mpc)', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=4:
for ii in range(len(med_plot_arr)):
for vv in range(len(nn_mass_dist)):
# lower_m = B['{0}'.format(ii)][0]
# upper_m = B['{0}'.format(ii)][1]
# plot_med_range(bin_centers,top_95[ii][vv],low_95[ii][vv],\
# axes_flat[zz],0.05,color='lightsteelblue')
# plot_med_range(bin_centers,top_68[ii][vv],low_68[ii][vv],\
# axes_flat[zz],0.15,color='gainsboro')
# plot_bands(bin_centers,upper_m,lower_m,axes_flat[zz])
plot_all_meds(bin_centers,med_plot_arr[ii][vv],axes_flat[zz],\
zz)
plot_eco_meds(bin_centers,eco_medians[ii][0],\
eco_medians[ii][1],eco_medians[ii][2],\
axes_flat[zz],zz)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=0.98, top=0.98,\
hspace=0,wspace=0)
plt.show()
###############################################################################
hist_low_info = {}
hist_high_info = {}
for ii in xrange(len(coords_test)):
hist_low_info[ii] = {}
hist_high_info[ii] = {}
for jj in range(len(neigh_vals)):
hist_low_info[ii][neigh_vals[jj]],hist_high_info[ii][neigh_vals[jj]] \
= hist_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM)
frac_vals = [2,4,10]
hist_low_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
hist_high_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
hist_low_arr[ii][jj] = (hist_low_info[jj][neigh_vals[ii]])
hist_high_arr[ii][jj] = (hist_high_info[jj][neigh_vals[ii]])
plot_low_hist = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(hist_low_arr))]
plot_high_hist = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(hist_high_arr))]
for jj in range(len(nn_mass_dist)):
for hh in range(len(frac_vals)):
for ii in range(len(neigh_vals)):
plot_low_hist[ii][hh][jj] = hist_low_arr[ii][jj][frac_vals[hh]]
plot_high_hist[ii][hh][jj] = hist_high_arr[ii][jj][frac_vals[hh]]
###############################################################################
C = {}
D = {}
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
coln_dict = {2:0,4:1,10:2}
nn_keys = np.sort(nn_dict.keys())
col_keys = np.sort(coln_dict.keys())
vv_num = len(plot_low_hist[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(vv_num):
vv_arr = np.array(plot_low_hist[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(vv_arr)
if cc == 0:
vv_tot = np.zeros((n_elem,1))
vv_tot = np.insert(vv_tot,len(vv_tot.T),vv_arr,1)
vv_tot = np.array(np.delete(vv_tot,0,axis=1))
for kk in xrange(len(vv_tot)):
vv_tot[kk][vv_tot[kk] == np.inf] = np.nan
vv_tot_max = [np.nanmax(vv_tot[kk]) for kk in xrange(len(vv_tot))]
vv_tot_min = [np.nanmin(vv_tot[kk]) for kk in xrange(len(vv_tot))]
C[bin_str] = [vv_tot_max,vv_tot_min]
hh_num = len(plot_high_hist[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(hh_num):
hh_arr = np.array(plot_high_hist[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(hh_arr)
if cc == 0:
hh_tot = np.zeros((n_elem,1))
hh_tot = np.insert(hh_tot,len(hh_tot.T),hh_arr,1)
hh_tot = np.array(np.delete(hh_tot,0,axis=1))
for kk in xrange(len(hh_tot)):
hh_tot[kk][hh_tot[kk] == np.inf] = np.nan
hh_tot_max = [np.nanmax(hh_tot[kk]) for kk in xrange(len(hh_tot))]
hh_tot_min = [np.nanmin(hh_tot[kk]) for kk in xrange(len(hh_tot))]
D[bin_str] = [hh_tot_max,hh_tot_min]
###############################################################################
nrow_num = int(6)
ncol_num = int(3)
fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num, \
figsize=(150,200), sharex= True,sharey=True)
axes_flat = axes.flatten()
fig.text(0.02, 0.5,r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$', ha='center',
va='center',rotation='vertical',fontsize=20)
for ii in range(len(mass_dat)):
zz = 0
for jj in range(len(neigh_vals)):
for hh in range(3):
# upper = C['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][0]
# lower = C['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][1]
# upper_2 = D['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][0]
# lower_2 = D['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][1]
# plot_bands(bin_centers,upper,lower,axes_flat[zz])
# plot_bands(bin_centers,upper_2,lower_2,axes_flat[zz])
plot_hists(mass_dat[ii][neigh_vals[jj]],neigh_vals[jj],bins,dlogM,\
axes_flat[zz], hh, zz)
if ii == 0:
plot_eco_hists(eco_mass_dat[jj],bins,dlogM,\
axes_flat[zz],hh,zz)
zz += int(1)
plt.subplots_adjust(left=0.07, bottom=0.09, right=0.98, top=0.98,\
hspace=0, wspace=0)
plt.show()
###############################################################################
def schechter_log_func(stellar_mass,phi_star,alpha,m_star):
"""
Returns a plottable Schechter function for the
stellar mass functions of galaxies
Parameters
----------
stellar_mass: array-like
An array of unlogged stellar mass values which
will eventually be the x-axis values the function
is plotted against
phi_star: float-like
A constant which normalizes (?) the function;
Moves the graph up and down
alpha: negative integer-like
The faint-end, or in this case, low-mass slope;
Describes the power-law portion of the curve
m_star: float-like
Unlogged value of the characteristic (?) stellar
mass; the "knee" of the function, where the
power-law gives way to the exponential portion
Returns
-------
res: array-like
Array of values prepadodgerblue to be plotted on a log
scale to display the Schechter function
"""
constant = np.log(10) * phi_star
log_M_Mstar = np.log10(stellar_mass/m_star)
res = constant * 10**(log_M_Mstar * (alpha+1)) * \
np.exp(-10**log_M_Mstar)
return res
###############################################################################
xdata = 10**bin_centers
p0 = (1,-1.05,10**10.64)
param_arr = [[] for ii in range(len(mass_freq)+1)]
fig,axes = plt.subplots(nrows=3,ncols=3,sharex=True,sharey=True,\
figsize=(150,200))
axes_flat = axes.flatten()
##rather than having mass_freq go out of index, just used if statement and
##directed it to the eco info
for ii in range(len(mass_freq)+1):
if ii == range(len(mass_freq)+1)[-1]:
ydata = eco_freq[0]
opt_v, est_cov = optimize.curve_fit(schechter_log_func,xdata,ydata,\
p0=p0,sigma=eco_freq[1])
else:
ydata = (mass_freq[ii])
opt_v, est_cov = optimize.curve_fit(schechter_log_func,xdata,ydata,\
p0=p0)
schech_vals = schechter_log_func(10**bin_centers,opt_v[0],opt_v[1],\
opt_v[2])
param_arr[ii] = opt_v
param_arr = np.array(param_arr)
ax = axes_flat[ii]
ax.set_yscale('log')
ax.set_ylim([10**-3,10**0])
ax.set_xlim([9.1,11.9])
ax.set_yticks([10**-2,10**-1,10**0])
ax.plot(bin_centers,schech_vals,label='Schechter',color='silver')
if ii == 8:
ax.errorbar(bin_centers,ydata,yerr=eco_freq[1],color='dodgerblue',\
label='ECO')
else:
ax.plot(bin_centers,ydata,label='Mock',color='darkorchid')
if ii == 0 or ii == 8:
ax.legend(loc='best')
if ii == 7:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
plt.subplots_adjust(left=0.03, bottom=0.08, right=0.99, top=0.99,\
hspace=0,wspace=0)
plt.show()
###############################################################################
eco_low = {}
eco_high = {}
for jj in range(len(neigh_vals)):
eco_low[neigh_vals[jj]] = {}
eco_high[neigh_vals[jj]] = {}
eco_low[neigh_vals[jj]], eco_high[neigh_vals[jj]] = hist_calcs\
(eco_mass_dat[jj],bins,dlogM,eco=True)
###############################################################################
# fig,ax = plt.subplots()
# ax.set_yscale('log')
# ax.plot(bin_centers,eco_low[1][2])
# plt.show()
###############################################################################
def param_finder(hist_counts,bin_centers):
"""
Parameters
----------
hist-counts: array-like
An array with stellar mass function values which will be used in the
Schechter function parameterization
bin_centers: array-like
An array with the same number of values as hist_counts; used as
independent variable in Schechter function
Returns
-------
opt_v: array-like
Array with three values: phi_star, alpha, and M_star
res_arr: array-like
Array with two values: alpha and log_M_star
"""
xdata = 10**bin_centers
p0 = (1,-1.05,10**10.64)
opt_v,est_cov = optimize.curve_fit(schechter_log_func,xdata,\
hist_counts,p0=p0)
alpha = opt_v[1]
log_m_star = np.log10(opt_v[2])
res_arr = np.array([alpha,log_m_star])
return opt_v, res_arr
###############################################################################
###Test that param_finder is working
# opt_v,test_arr = param_finder(eco_low[1][2],bin_centers)
# schech_vals = schechter_log_func(10**bin_centers,opt_v[0],opt_v[1],\
# opt_v[2])
# ####THE error isn't working. Stops after 800 iterations.
# # opt_v,est_v = optimize.curve_fit(schechter_log_func,10**bin_centers,
# # eco_low[1][2],p0 = (1,-1.05,10**10.64),sigma=eco_low[1]['low_err'][0],\
# # absolute_sigma=True)
# fig,ax = plt.subplots()
# ax.set_yscale('log')
# ax.plot(bin_centers,eco_low[1][2])
# ax.plot(bin_centers,schech_vals)
# plt.show()
###############################################################################
def perc_calcs(mass,bins,dlogM):
mass_counts, edges = np.histogram(mass,bins)
mass_freq = mass_counts/float(len(mass))/dlogM
return mass_freq
###############################################################################
def deciles(mass):
dec_val = int(len(mass)/10)
res_list = [[] for bb in range(10)]
for aa in range(0,10):
if aa == 9:
res_list[aa] = mass[aa*dec_val:]
else:
res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val]
return res_list
###############################################################################
eco_dec = {}
for cc in range(len(eco_mass_dat)):
eco_dec[neigh_vals[cc]] = deciles(eco_mass_dat[cc])
# for ii in range(len(eco_dec[1])):
# print len(eco_dec[1][ii])
eco_dec_smf = {}
for ss in neigh_vals:
eco_dec_smf[ss] = {}
for tt in range(len(eco_dec[ss])):
eco_dec_smf[ss][tt] = perc_calcs(eco_dec[ss][tt],bins,dlogM)
eco_dec_alpha = {}
eco_dec_logMstar = {}
for oo in neigh_vals:
eco_dec_alpha[oo] = []
eco_dec_logMstar[oo] = []
for pp in range(len(eco_dec[oo])):
opt_v, temp_res_arr = param_finder(eco_dec_smf[oo][pp],bin_centers)
eco_dec_alpha[oo].append(temp_res_arr[0])
eco_dec_logMstar[oo].append(temp_res_arr[1])
ten_x = range(1,11)
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(ten_x,eco_dec_alpha[ii])
# ax.set_xlim(0,11)
# plt.show()
###############################################################################
def plot_deciles(dec_num,y_vals,ax,plot_idx,eco=False,logMstar=False,\
color='gray'):
if eco == True:
titles = [1,2,3,5,10,20]
ax.set_xlim(0,11)
if logMstar == True:
ax.set_ylim(10,12)
ax.set_yticks(np.arange(10,12,0.5))
else:
ax.set_ylim(-1.25,-1.)
ax.set_yticks(np.arange(-1.25,-1.,0.05))
ax.set_xticks(range(1,11))
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('Decile',fontsize=18)
if eco == True:
ax.plot(dec_num,y_vals,marker='o',color=color,linewidth=2.5,\
markeredgecolor=color)
else:
ax.plot(dec_num,y_vals,color=color,alpha=0.5)
###############################################################################
# nrow_num_mass = int(2)
# ncol_num_mass = int(3)
# fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
# figsize=(100,200), sharex= True, sharey = True)
# axes_flat = axes.flatten()
# zz = int(0)
# while zz <=5:
# ii = neigh_vals[zz]
# plot_deciles(ten_x,eco_dec_logMstar[ii],axes_flat[zz],zz,eco=True,\
# logMstar=True)
# zz += 1
# plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
# hspace=0,wspace=0)
# plt.show()
###############################################################################
def quartiles(mass):
dec_val = int(len(mass)/4)
res_list = [[] for bb in range(4)]
for aa in range(0,4):
if aa == 3:
res_list[aa] = mass[aa*dec_val:]
else:
res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val]
return res_list
###############################################################################
eco_quarts = {}
for cc in range(len(eco_mass_dat)):
eco_quarts[neigh_vals[cc]] = quartiles(eco_mass_dat[cc])
eco_quarts_smf = {}
for ss in neigh_vals:
eco_quarts_smf[ss] = {}
for tt in range(len(eco_quarts[ss])):
eco_quarts_smf[ss][tt] = perc_calcs(eco_quarts[ss][tt],bins,dlogM)
eco_quarts_alpha = {}
eco_quarts_logMstar = {}
for oo in neigh_vals:
eco_quarts_alpha[oo] = []
eco_quarts_logMstar[oo] = []
for pp in range(len(eco_quarts[oo])):
opt_v, temp_res_arr = param_finder(eco_quarts_smf[oo][pp],bin_centers)
eco_quarts_alpha[oo].append(temp_res_arr[0])
eco_quarts_logMstar[oo].append(temp_res_arr[1])
quart_x = range(1,5)
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(quart_x,eco_quarts_alpha[ii])
# ax.set_xlim(0,5)
# plt.show()
###############################################################################
def plot_quartiles(quart_num,y_vals,ax,plot_idx,eco=False,logMstar=False,\
color='gray'):
if eco == True:
titles = [1,2,3,5,10,20]
ax.set_xlim(0,5)
if logMstar == True:
ax.set_ylim(10,12)
ax.set_yticks(np.arange(10,12,0.5))
else:
ax.set_ylim(-1.2,-1.)
ax.set_yticks(np.arange(-1.2,-1.,0.04))
ax.set_xticks(range(1,5))
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('Quartiles',fontsize=18)
if eco == True:
ax.plot(quart_num,y_vals,marker='o',color=color,linewidth=2,\
markeredgecolor=color)
else:
ax.plot(quart_num,y_vals,color=color)
###############################################################################
# nrow_num_mass = int(2)
# ncol_num_mass = int(3)
# fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
# figsize=(100,200), sharex= True, sharey = True)
# axes_flat = axes.flatten()
# zz = int(0)
# while zz <=5:
# ii = neigh_vals[zz]
# plot_quartiles(quart_x,eco_quarts_logMstar[ii],axes_flat[zz],zz,eco=True,\
# logMstar=True)
# zz += 1
# plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
# hspace=0,wspace=0)
# plt.show()
###############################################################################
def quart_finder(mass,bins,dlogM,neigh_vals):
quarts = {}
for ii in neigh_vals:
quarts[ii] = quartiles(mass[ii])
quarts_smf = {}
for ss in neigh_vals:
quarts_smf[ss] = {}
for tt in range(len(quarts[ss])):
quarts_smf[ss][tt] = perc_calcs(quarts[ss][tt],bins,dlogM)
quarts_alpha = {}
quarts_logMstar = {}
for oo in neigh_vals:
quarts_alpha[oo] = []
quarts_logMstar[oo] = []
for pp in range(len(quarts[oo])):
opt_v, temp_res_arr = param_finder(quarts_smf[oo][pp],bin_centers)
quarts_alpha[oo].append(temp_res_arr[0])
quarts_logMstar[oo].append(temp_res_arr[1])
return quarts_alpha, quarts_logMstar
###############################################################################
mock_quarts_alpha_dict = {}
mock_quarts_logMstar_dict = {}
for jj in range(len(mass_dat)):
mock_quarts_alpha_dict[jj], mock_quarts_logMstar_dict[jj] = quart_finder\
(mass_dat[jj],bins,dlogM,neigh_vals)
###############################################################################
def dec_finder(mass,bins,dlogM,neigh_vals):
decs = {}
for ii in neigh_vals:
decs[ii] = deciles(mass[ii])
decs_smf = {}
for ss in neigh_vals:
decs_smf[ss] = {}
for tt in range(len(decs[ss])):
decs_smf[ss][tt] = perc_calcs(decs[ss][tt],bins,dlogM)
decs_alpha = {}
decs_logMstar = {}
for oo in neigh_vals:
decs_alpha[oo] = []
decs_logMstar[oo] = []
for pp in range(len(decs[oo])):
opt_v, temp_res_arr = param_finder(decs_smf[oo][pp],bin_centers)
decs_alpha[oo].append(temp_res_arr[0])
decs_logMstar[oo].append(temp_res_arr[1])
return decs_alpha, decs_logMstar
###############################################################################
mock_dec_alpha_dict = {}
mock_dec_logMstar_dict = {}
for jj in range(len(mass_dat)):
mock_dec_alpha_dict[jj], mock_dec_logMstar_dict[jj] = dec_finder\
(mass_dat[jj],bins,dlogM,neigh_vals)
###############################################################################
###quartiles logMstar
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, '$\log\ (M_{*}/M_{\odot})$', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_quartiles(quart_x,mock_quarts_logMstar_dict[ff][ii],axes_flat[zz],\
zz,logMstar=True)
plot_quartiles(quart_x,eco_quarts_logMstar[ii],axes_flat[zz],zz,\
logMstar=True,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, '$\log\ (M_{*}/M_{\odot})$', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_deciles(ten_x,mock_dec_logMstar_dict[ff][ii],axes_flat[zz],\
zz,logMstar=True)
plot_deciles(ten_x,eco_dec_logMstar[ii],axes_flat[zz],zz,\
logMstar=True,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, r'$\alpha$', ha='center', \
va='center',rotation='vertical',fontsize=25)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_deciles(ten_x,mock_dec_alpha_dict[ff][ii],axes_flat[zz],zz,\
logMstar=False)
plot_deciles(ten_x,eco_dec_alpha[ii],axes_flat[zz],zz,\
logMstar=False,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, r'$\alpha$', ha='center', \
va='center',rotation='vertical',fontsize=25)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_quartiles(quart_x,mock_quarts_alpha_dict[ff][ii],axes_flat[zz],zz,\
logMstar=False)
plot_quartiles(quart_x,eco_quarts_alpha[ii],axes_flat[zz],zz,\
logMstar=False,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
##Creating dictionaries through for loops to house the parameters for each of
#ECO's 18 different options (6 nn and 3 density cuts)
#One dictionary for the lower portion of the cuts and one for the higher
# param_dict_low = {}
# param_dict_high = {}
# for dd in neigh_vals:
# param_dict_low[dd] = {}
# param_dict_high[dd] = {}
# for ee in frac_vals:
# param_dict_low[dd][ee] = {}
# param_dict_high[dd][ee] = {}
# opt_v, param_dict_low[dd][ee] = param_finder(eco_low[dd][ee],\
# bin_centers)
# opt_v, param_dict_high[dd][ee] = param_finder(eco_high[dd][ee],\
# bin_centers)
# #### Putting the percentile cuts in order, as seen below
# #10,25,low_50,high_50,75,90
# over_alpha_dict = {}
# over_log_m_star = {}
# for dd in neigh_vals:
# temp_list_alpha = []
# temp_list_logMstar = []
# over_alpha_dict[dd] = {}
# over_log_m_star[dd] = {}
# low_idx = np.array(list(reversed(np.sort(param_dict_low[dd].keys()))))
# high_idx = np.sort(param_dict_high[dd].keys())
# for ff in range(len(low_idx)):
# temp_list_alpha.append(param_dict_low[dd][low_idx[ff]][0])
# temp_list_logMstar.append(param_dict_low[dd][low_idx[ff]][1])
# for ff in range(len(high_idx)):
# temp_list_alpha.append(param_dict_high[dd][high_idx[ff]][0])
# temp_list_logMstar.append(param_dict_high[dd][high_idx[ff]][1])
# over_alpha_dict[dd] = temp_list_alpha
# over_log_m_star[dd] = temp_list_logMstar
# perc_arr = (10,25,49,51,75,90)
# fig,ax = plt.subplots()
# for jj in neigh_vals:
# ax.plot(perc_arr,over_log_m_star[jj],marker='o',label='{0}'.format(jj), \
# linestyle='--')
# ax.set_xlim([0,100])
# ax.legend(loc='best', numpoints=1)
# ax.set_xlabel('Percentile')
# ax.set_ylabel(r'$\log\ M_{*}$')
# plt.show()
# fig,ax = plt.subplots()
# for jj in neigh_vals:
# ax.plot(perc_arr,over_alpha_dict[jj],marker='o',label='{0}'.format(jj), \
# linestyle='--')
# ax.set_xlim([0,100])
# ax.legend(loc='best', numpoints=1)
# ax.set_xlabel('Percentile')
# ax.set_ylabel(r'$\alpha$')
# plt.show()
### moving around the parameters so that I can find the differences, rather
#than just plotting them straigh-up
# diff_dict_m_star = {}
# diff_dict_alpha = {}
# for dd in neigh_vals:
# diff_dict_m_star[dd] = {}
# diff_dict_alpha[dd] = {}
# for jj in frac_vals:
# temp_list_diff_m_star = []
# temp_list_diff_alpha = []
# diff_dict_alpha[dd][jj] = {}
# diff_dict_m_star[dd][jj] = {}
# temp_list_diff_m_star.append((param_dict_high[dd][jj][1] - \
# param_dict_low[dd][jj][1]))
# temp_list_diff_alpha.append(((param_dict_high[dd][jj][0]-\
# param_dict_low[dd][jj][0])/param_dict_high[dd][jj][0] * 100))
# diff_dict_alpha[dd][jj] = np.array(temp_list_diff_alpha)
# diff_dict_m_star[dd][jj] = np.array(temp_list_diff_m_star)
# dict_revamp_mstar = {}
# for dd in neigh_vals:
# dict_revamp_mstar[dd] = []
# for jj in frac_vals:
# dict_revamp_mstar[dd].append(diff_dict_m_star[dd][jj])
# dict_revamp_alpha = {}
# for dd in neigh_vals:
# dict_revamp_alpha[dd] = []
# for jj in frac_vals:
# dict_revamp_alpha[dd].append(diff_dict_alpha[dd][jj])
# discrete_x = np.array([1,2,3])
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(discrete_x,dict_revamp_mstar[ii],marker='o',\
# linestyle= '--',label='{0}'.format(ii))
# ax.set_xlim(0,4)
# ax.set_xlabel('Fractional Cut',fontsize=18)
# ax.set_xticks([1,2,3])
# ax.set_ylabel('Difference in $\log\ M_{*}$, h-l',fontsize=18)
# ax.legend(loc='best',numpoints=1)
# ax.text(1,0.5,'50/50 Cut',horizontalalignment='center')
# ax.text(2,0.6,'25/75 Cut',horizontalalignment='center')
# ax.text(3,0.75,'10/90 Cut',horizontalalignment='center')
# plt.show()
# ######
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(discrete_x,dict_revamp_alpha[ii],marker='o',\
# linestyle= '--',label='{0}'.format(ii))
# ax.set_xlim(0,4)
# ax.set_xlabel('Fractional Cut',fontsize=18)
# ax.set_xticks([1,2,3])
# ax.set_ylabel(r'Difference in $\alpha$, (h-l)/h',fontsize=18)
# ax.legend(loc='best',numpoints=1)
# ax.text(1,-7,'50/50 Cut',horizontalalignment='center')
# ax.text(2,-7,'25/75 Cut',horizontalalignment='center')
# ax.text(3,-7,'10/90 Cut',horizontalalignment='center')
# plt.show()
#50/50,25/75,10/908
# mocks_high_alpha = {}
# mocks_high_logMstar = {}
# mocks_low_alpha = {}
# mocks_low_logMstar = {}
# for rr in xrange(len(hist_high_info)):
# mocks_high_alpha[rr] = {}
# mocks_high_logMstar[rr] = {}
# mocks_low_alpha[rr] = {}
# mocks_low_logMstar[rr] = {}
# for ss in neigh_vals:
# mocks_high_alpha[rr][ss] = {}
# mocks_high_logMstar[rr][ss] = {}
# mocks_low_alpha[rr][ss] = {}
# mocks_low_logMstar[rr][ss] = {}
# for tt in frac_vals:
# opt_v, temp_res_high = param_finder(hist_high_info[rr][ss][tt],\
# bin_centers)
# opt_v, temp_res_low = param_finder(hist_low_info[rr][ss][tt],\
# bin_centers)
# mocks_high_alpha[rr][ss][tt] = temp_res_high[0]
# mocks_high_logMstar[rr][ss][tt] = temp_res_high[1]
# mocks_low_alpha[rr][ss][tt] = temp_res_low[0]
# mocks_low_logMstar[rr][ss][tt] = temp_res_low[1]
|
hrichstein/Stellar_mass_env_Density
|
Codes/Scripts/clean_copy.py
|
Python
|
mit
| 75,233
|
[
"Galaxy"
] |
09348955b895a575a069ec6799a9dab8897f28a52de2af4f515fb241ef79df1d
|
# -*- coding: utf-8 -*-
#
# test_3d_gauss.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.hl_api.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75, -0.75, -0.75],
'upper_right': [0.75, 0.75, 0.75]}},
'kernel': {'gaussian': {'p_center': 1., 'sigma': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
|
hakonsbm/nest-simulator
|
topology/examples/test_3d_gauss.py
|
Python
|
gpl-2.0
| 2,931
|
[
"Gaussian"
] |
fe4e45d2082d5237afd265a130f53c22891702f6d5a419b8684c4098ea5be1f5
|
#!/usr/bin/env python3
import itertools
import logging
import operator
import unicodedata
import regex as re
from somajo import doubly_linked_list
from somajo import utils
from somajo.token import Token
class Tokenizer():
_supported_languages = set(["de", "de_CMC", "en", "en_PTB"])
_default_language = "de_CMC"
def __init__(self, split_camel_case=False, token_classes=False, extra_info=False, language="de_CMC"):
"""Create a Tokenizer object. If split_camel_case is set to True,
tokens written in CamelCase will be split. If token_classes is
set to true, the tokenizer will output the token class for
each token (if it is a number, an XML tag, an abbreviation,
etc.). If extra_info is set to True, the tokenizer will output
information about the original spelling of the tokens.
"""
self.split_camel_case = split_camel_case
self.token_classes = token_classes
self.extra_info = extra_info
self.language = language if language in self._supported_languages else self.default_language
self.spaces = re.compile(r"\s+")
self.spaces_or_empty = re.compile(r"^\s*$")
self.controls = re.compile(r"[\u0000-\u001F\u007F-\u009F]")
self.stranded_variation_selector = re.compile(r" \uFE0F")
# soft hyphen (00AD), zero-width space (200B), zero-width
# non-joiner (200C), zero-width joiner (200D), Arabic letter
# mark (061C), left-to-right mark (200E), right-to-left mark
# (200F), word joiner (2060), left-to-right isolate (2066),
# right-to-left isolate (2067), first strong isolate (2068),
# pop directional isolate (2069), l-t-r/r-t-l embedding (202A,
# 202B), l-t-r/r-t-l override (202D, 202E), pop directional
# formatting (202C), zero-width no-break space (FEFF)
self.other_nasties = re.compile(r"[\u00AD\u061C\u200B-\u200F\u202A-\u202E\u2060\u2066-\u2069\uFEFF]")
# TAGS, EMAILS, URLs
self.xml_declaration = re.compile(r"""<\?xml
(?: # This group permits zero or more attributes
\s+ # Whitespace to separate attributes
[_:A-Z][-.:\w]* # Attribute name
\s*=\s* # Attribute name-value delimiter
(?: "[^"]*" # Double-quoted attribute value
| '[^']*' # Single-quoted attribute value
)
)*
\s* # Permit trailing whitespace
\?>""", re.VERBOSE | re.IGNORECASE)
# self.tag = re.compile(r'<(?!-)(?:/[^> ]+|[^>]+/?)(?<!-)>')
# taken from Regular Expressions Cookbook
self.tag = re.compile(r"""
<
(?: # Branch for opening tags:
([_:A-Z][-.:\w]*) # Capture the opening tag name to backreference 1
(?: # This group permits zero or more attributes
\s+ # Whitespace to separate attributes
[_:A-Z][-.:\w]* # Attribute name
\s*=\s* # Attribute name-value delimiter
(?: "[^"]*" # Double-quoted attribute value
| '[^']*' # Single-quoted attribute value
)
)*
\s* # Permit trailing whitespace
/? # Permit self-closed tags
| # Branch for closing tags:
/
([_:A-Z][-.:\w]*) # Capture the closing tag name to backreference 2
\s* # Permit trailing whitespace
)
>
""", re.VERBOSE | re.IGNORECASE)
# regex for email addresses taken from:
# http://www.regular-expressions.info/email.html
# self.email = re.compile(r"\b[\w.%+-]+@[\w.-]+\.\p{L}{2,}\b")
self.email = re.compile(r"\b[\w.%+-]+(?:@| \[at\] )[\w.-]+(?:\.| \[?dot\]? )\p{L}{2,}\b")
# simple regex for urls that start with http or www
# TODO: schließende Klammer am Ende erlauben, wenn nach http etc. eine öffnende kam
self.simple_url_with_brackets = re.compile(r'\b(?:(?:https?|ftp|svn)://|(?:https?://)?www\.)\S+?\(\S*?\)\S*(?=$|[\'. "!?,;])', re.IGNORECASE)
self.simple_url = re.compile(r'\b(?:(?:https?|ftp|svn)://|(?:https?://)?www\.)\S+[^\'. "!?,;:)]', re.IGNORECASE)
self.doi = re.compile(r'\bdoi:10\.\d+/\S+', re.IGNORECASE)
self.doi_with_space = re.compile(r'(?<=\bdoi: )10\.\d+/\S+', re.IGNORECASE)
# regex for ISBNs adapted from:
# https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s13.html
self.isbn = re.compile(r"""\b
(?:
(?<=ISBN(?:-1[03])?:?[ ]?) # Either preceded by ISBN identifier
| # or,
(?<![0-9][ -]) # if there is no ISBN identifier, not preceded by [0-9][ -].
)
(?:
(?:[0-9]{9}[0-9X]) # ISBN-10 without separators.
|
(?:(?=[0-9X -]{13}\b) # ISBN-10 with separators.
[0-9]{1,5}([ -]) # 1-5 digit group identifier.
[0-9]{1,7}\1 # Publisher identifier.
[0-9]{1,7}\1 # Title identifier.
[0-9X]) # Check digit.
|
(?:97[89][0-9]{10}) # ISBN-13 without separators.
|
(?:(?=[0-9X -]{17}\b) # ISBN-13 with separators
97[89]([ -]) # ISBN-13 prefix.
[0-9]{1,5}\2 # 1-5 digit group identifier.
[0-9]{1,7}\2 # Publisher identifier.
[0-9]{1,7}\2 # Title identifier.
[0-9]) # Check digit.
)
(?!\w|[ -][0-9])""", re.VERBOSE | re.IGNORECASE)
# we also allow things like tagesschau.de-App
self.url_without_protocol = re.compile(r'\b[\w./-]+\.(?:de|com|tv|me|net|us|org|at|cc|ly|be|ch|info|live|eu|edu|gov|jpg|png|gif|log|txt|xlsx?|docx?|pptx?|pdf)(?:-\w+)?\b', re.IGNORECASE)
self.reddit_links = re.compile(r'(?<!\w)/?[rlu](?:/\w+)+/?(?!\w)', re.IGNORECASE)
# XML entities
self.entity = re.compile(r"""&(?:
quot|amp|apos|lt|gt # named entities
|
\#\d+ # decimal entities
|
\#x[0-9a-f]+ # hexadecimal entities
);""", re.VERBOSE | re.IGNORECASE)
# EMOTICONS
emoticon_set = set(["(-.-)", "(T_T)", "(♥_♥)", ")':", ")-:",
"(-:", ")=", ")o:", ")x", ":'C", ":/",
":<", ":C", ":[", "=(", "=)", "=D", "=P",
">:", "\\:", "]:", "x(", "^^", "o.O",
"\\O/", "\\m/", ":;))", "_))", "*_*",
"._.", ">_<", "*<:-)", ":!:", ":;-))",
"x'D", ":^)", "(>_<)", ":->", "\\o/",
"B-)", ":-$", "O:-)", "=-O", ":O", ":-!",
":-x", ":-|", ":-\\", ":-[", ">:-(",
"^.^"])
# From https://textfac.es/
textfaces_space = set(['⚆ _ ⚆', '˙ ͜ʟ˙', '◔ ⌣ ◔', '( ゚ヮ゚)', '(• ε •)',
'(づ ̄ ³ ̄)づ', '♪~ ᕕ(ᐛ)ᕗ', '\\ (•◡•) /', '( ಠ ͜ʖರೃ)',
'( ⚆ _ ⚆ )', '(▀̿Ĺ̯▀̿ ̿)', '༼ つ ◕_◕ ༽つ', '༼ つ ಥ_ಥ ༽つ',
'( ͡° ͜ʖ ͡°)', '( ͡°╭͜ʖ╮͡° )', '(╯°□°)╯︵ ┻━┻',
'( ͡ᵔ ͜ʖ ͡ᵔ )', '┬──┬ ノ( ゜-゜ノ)', '┬─┬ノ( º _ ºノ)',
'(ง ͠° ͟ل͜ ͡°)ง', '(͡ ͡° ͜ つ ͡͡°)', "﴾͡๏̯͡๏﴿ O'RLY?",
'(╯°□°)╯︵( .o.)', '(° ͡ ͜ ͡ʖ ͡ °)', '┬─┬ ︵ /(.□. )',
'(/) (°,,°) (/)', '| (• ◡•)| (❍ᴥ❍ʋ)',
'༼ つ ͡° ͜ʖ ͡° ༽つ', '(╯°□°)╯︵ ʞooqǝɔɐɟ', '┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻',
'┬┴┬┴┤ ͜ʖ ͡°) ├┬┴┬┴', '(ó ì_í)=óò=(ì_í ò)',
'(•_•) ( •_•)>⌐■-■ (⌐■_■)', '(ノ◕ヮ◕)ノ*:・゚✧ ✧゚・: *ヽ(◕ヮ◕ヽ)',
'[̲̅$̲̅(̲̅ ͡° ͜ʖ ͡°̲̅)̲̅$̲̅]', '/╲/\\╭( ͡° ͡° ͜ʖ ͡° ͡°)╮/\\╱\\',
'( ͡°( ͡° ͜ʖ( ͡° ͜ʖ ͡°)ʖ ͡°) ͡°)', '(._.) ( l: ) ( .-. ) ( :l ) (._.)',
"̿ ̿ ̿'̿'\\̵͇̿̿\\з=(•_•)=ε/̵͇̿̿/'̿'̿ ̿", '༼ ºل͟º ༼ ºل͟º ༼ ºل͟º ༽ ºل͟º ༽ ºل͟º ༽',
"̿'̿'\\̵͇̿̿\\з=( ͠° ͟ʖ ͡°)=ε/̵͇̿̿/'̿̿ ̿ ̿ ̿ ̿ ̿",
"̿̿ ̿̿ ̿̿ ̿'̿'\\̵͇̿̿\\з= ( ▀ ͜͞ʖ▀) =ε/̵͇̿̿/’̿’̿ ̿ ̿̿ ̿̿ ̿̿",
# From Signal:
"ヽ(°◇° )ノ", "■-■¬ <(•_•)"])
textfaces_emoji = set(['♥‿♥', '☼.☼', '≧☉_☉≦', '(°ロ°)☝', '(☞゚∀゚)☞', '☜(˚▽˚)☞', '☜(⌒▽⌒)☞', '(☞ຈل͜ຈ)☞', 'ヾ(⌐■_■)ノ♪', '(☞゚ヮ゚)☞', '☜(゚ヮ゚☜)'])
textfaces_wo_emoji = set(['=U', 'ಠ_ಠ', '◉_◉', 'ಥ_ಥ', ":')", 'ಠ⌣ಠ', 'ಠ~ಠ', 'ಠ_ಥ', 'ಠ‿↼', 'ʘ‿ʘ', 'ಠoಠ', 'ರ_ರ', '◔̯◔', '¬_¬', 'ب_ب', '°Д°', '^̮^', '^̮^', '^̮^', '>_>', '^̮^', '^̮^', 'ಠ╭╮ಠ', '(>ლ)', 'ʕ•ᴥ•ʔ', '(ಥ﹏ಥ)', '(ᵔᴥᵔ)', '(¬‿¬)', '⌐╦╦═─', '(•ω•)', '(¬_¬)', '。◕‿◕。', '(ʘ‿ʘ)', '٩◔̯◔۶', '(>人<)', '(~_^)', '(^̮^)', '(・.◤)', '(◕‿◕✿)', '。◕‿‿◕。', '(─‿‿─)', '(;一_一)', "(ʘᗩʘ')", '(✿´‿`)', 'ლ(ಠ益ಠლ)', '~(˘▾˘~)', '(~˘▾˘)~', '(。◕‿◕。)', '(っ˘ڡ˘ς)', 'ლ(´ڡ`ლ)', 'ƪ(˘⌣˘)ʃ', '(´・ω・`)', '(ღ˘⌣˘ღ)', '(▰˘◡˘▰)', '〆(・∀・@)', '༼ʘ̚ل͜ʘ̚༽', 'ᕙ(⇀‸↼‶)ᕗ', 'ᕦ(ò_óˇ)ᕤ', '(。◕‿‿◕。)', 'ヽ༼ຈل͜ຈ༽ノ', '(ง°ل͜°)ง', '╚(ಠ_ಠ)=┐', '(´・ω・)っ由', 'Ƹ̵̡Ӝ̵̨̄Ʒ', '¯\\_(ツ)_/¯', '▄︻̷̿┻̿═━一', "(ง'̀-'́)ง", '¯\\(°_o)/¯', '。゜(`Д´)゜。', '(づ。◕‿‿◕。)づ', '(;´༎ຶД༎ຶ`)', '(ノಠ益ಠ)ノ彡┻━┻', 'ლ,ᔑ•ﺪ͟͠•ᔐ.ლ', '(ノ◕ヮ◕)ノ*:・゚✧', '┬┴┬┴┤(・_├┬┴┬┴', '[̲̅$̲̅(̲̅5̲̅)̲̅$̲̅]'])
self.textfaces_space = re.compile(r"|".join([re.escape(_) for _ in sorted(textfaces_space, key=len, reverse=True)]))
self.textfaces_emoji = re.compile(r"|".join([re.escape(_) for _ in sorted(textfaces_emoji, key=len, reverse=True)]))
textfaces_signal = set(["\\(ˆ˚ˆ)/", "(╥﹏╥)", "(╯°□°)╯︵", "┻━┻", "┬─┬", "ノ(°–°ノ)", "(^._.^)ノ", "ฅ^•ﻌ•^ฅ", "(•_•)", "(■_■¬)", "ƪ(ړײ)ƪ"])
emoticon_list = sorted(emoticon_set | textfaces_wo_emoji | textfaces_signal, key=len, reverse=True)
self.emoticon = re.compile(r"""(?:(?:[:;]|(?<!\d)8) # a variety of eyes, alt.: [:;8]
[-'oO]? # optional nose or tear
(?: \)+ | \(+ | [*] | ([DPp])\1*(?!\w))) # a variety of mouths
""" +
r"|" +
r"(?:\b[Xx]D+\b)" +
r"|" +
r"(?:\b(?:D'?:|oO)\b)" +
r"|" +
r"(?:(?<!\b\d{1,2}):\w+:(?!\d{2}\b))" + # Textual representations of emojis: :smile:, etc. We don't want to match times: 08:30:00
r"|" +
r"|".join([re.escape(_) for _ in emoticon_list]), re.VERBOSE)
# Avoid matching phone numbers like "Tel: ( 0049)" or "Tel: (+49)"
self.space_emoticon = re.compile(r"""([:;]) # eyes
[ ] # space between eyes and mouth
([()]) # mouths
(?![ ]?(?:00|[+])\d) # not followed by, e.g., 0049 or +49 (issue #12)
""", re.VERBOSE)
# ^3 is an emoticon, unless it is preceded by a number (with
# optional whitespace between number and ^3)
# ^\^3 # beginning of line, no leading characters
# ^\D\^3 # beginning of line, one leading character
# (?<=\D[ ])\^3 # two leading characters, non-number + space
# (?<=.[^\d ])\^3 # two leading characters, x + non-space-non-number
# (?<=[<^]3[ ]?)\^3 # leading heart with optional space
self.heart_emoticon = re.compile(r"(?:^|^\D|(?<=\D[ ])|(?<=.[^\d ])|(?<=[<^]3[ ]?))[<^]3(?!\d)")
# U+2600..U+26FF Miscellaneous Symbols
# U+2700..U+27BF Dingbats
# U+FE0E..U+FE0F text and emoji variation selectors
# U+1F300..U+1F5FF Miscellaneous Symbols and Pictographs
# -> U+1F3FB..U+1F3FF Emoji modifiers (skin tones)
# U+1F600..U+1F64F Emoticons
# U+1F680..U+1F6FF Transport and Map Symbols
# U+1F900..U+1F9FF Supplemental Symbols and Pictographs
# self.unicode_symbols = re.compile(r"[\u2600-\u27BF\uFE0E\uFE0F\U0001F300-\U0001f64f\U0001F680-\U0001F6FF\U0001F900-\U0001F9FF]")
self.symbols_and_dingbats = re.compile(r"[\u2600-\u27BF]")
self.unicode_flags = re.compile(r"\p{Regional_Indicator}{2}\uFE0F?")
# special tokens containing + or &
tokens_with_plus_or_ampersand = utils.read_abbreviation_file("tokens_with_plus_or_ampersand.txt")
plus_amp_simple = [(pa, re.search(r"^\w+[&+]\w+$", pa)) for pa in tokens_with_plus_or_ampersand]
self.simple_plus_ampersand = set([pa[0].lower() for pa in plus_amp_simple if pa[1]])
self.simple_plus_ampersand_candidates = re.compile(r"\b\w+[&+]\w+\b")
tokens_with_plus_or_ampersand = [pa[0] for pa in plus_amp_simple if not pa[1]]
# self.token_with_plus_ampersand = re.compile(r"(?<!\w)(?:\L<patokens>)(?!\w)", re.IGNORECASE, patokens=tokens_with_plus_or_ampersand)
self.token_with_plus_ampersand = re.compile(r"(?<!\w)(?:" + r"|".join([re.escape(_) for _ in tokens_with_plus_or_ampersand]) + r")(?!\w)", re.IGNORECASE)
# camelCase
self.emoji = re.compile(r'\bemojiQ\p{L}{3,}\b')
camel_case_token_list = utils.read_abbreviation_file("camel_case_tokens.txt")
cc_alnum = [(cc, re.search(r"^\w+$", cc)) for cc in camel_case_token_list]
self.simple_camel_case_tokens = set([cc[0] for cc in cc_alnum if cc[1]])
self.simple_camel_case_candidates = re.compile(r"\b\w*\p{Ll}\p{Lu}\w*\b")
camel_case_token_list = [cc[0] for cc in cc_alnum if not cc[1]]
# things like ImmobilienScout24.de are already covered by URL detection
# self.camel_case_url = re.compile(r'\b(?:\p{Lu}[\p{Ll}\d]+){2,}\.(?:de|com|org|net|edu)\b')
self.camel_case_token = re.compile(r"\b(?:" + r"|".join([re.escape(_) for _ in camel_case_token_list]) + r"|:Mac\p{Lu}\p{Ll}*)\b")
# self.camel_case_token = re.compile(r"\b(?:\L<cctokens>|Mac\p{Lu}\p{Ll}*)\b", cctokens=camel_case_token_set)
self.in_and_innen = re.compile(r'\b\p{L}+\p{Ll}In(?:nen)?\p{Ll}*\b')
self.camel_case = re.compile(r'(?<=\p{Ll}{2})(\p{Lu})(?!\p{Lu}|\b)')
# GENDER MARKER
self.gender_marker = re.compile(r'\b\p{L}+[*:/]in(?:nen)?\p{Ll}*\b', re.IGNORECASE)
# ABBREVIATIONS
self.single_letter_ellipsis = re.compile(r"(?<![\w.])(?P<a_letter>\p{L})(?P<b_ellipsis>\.{3})(?!\.)")
self.and_cetera = re.compile(r"(?<![\w.&])&c\.(?!\p{L}{1,3}\.)")
self.str_abbreviations = re.compile(r'(?<![\w.])([\p{L}-]+str\.)(?!\p{L})', re.IGNORECASE)
self.nr_abbreviations = re.compile(r"(?<![\w.])(\w+\.-?Nr\.)(?!\p{L}{1,3}\.)", re.IGNORECASE)
self.single_letter_abbreviation = re.compile(r"(?<![\w.])\p{L}\.(?!\p{L}{1,3}\.)")
# abbreviations with multiple dots that constitute tokens
single_token_abbreviation_list = utils.read_abbreviation_file("single_token_abbreviations_%s.txt" % self.language[:2])
self.single_token_abbreviation = re.compile(r"(?<![\w.])(?:" + r'|'.join([re.escape(_) for _ in single_token_abbreviation_list]) + r')(?!\p{L})', re.IGNORECASE)
self.ps = re.compile(r"(?<!\d[ ])\bps\.", re.IGNORECASE)
self.multipart_abbreviation = re.compile(r'(?:\p{L}+\.){2,}')
# only abbreviations that are not matched by (?:\p{L}\.)+
abbreviation_list = utils.read_abbreviation_file("abbreviations_%s.txt" % self.language[:2], to_lower=True)
# abbrev_simple = [(a, re.search(r"^\p{L}{2,}\.$", a)) for a in abbreviation_list]
# self.simple_abbreviations = set([a[0].lower() for a in abbrev_simple if a[1]])
# self.simple_abbreviation_candidates = re.compile(r"(?<![\w.])\p{L}{2,}\.(?!\p{L}{1,3}\.)")
# abbreviation_list = [a[0] for a in abbrev_simple if not a[1]]
self.abbreviation = re.compile(r"(?<![\p{L}.])(?:" +
r"(?:(?:\p{L}\.){2,})" +
r"|" +
# r"(?i:" + # this part should be case insensitive
r'|'.join([re.escape(_) for _ in abbreviation_list]) +
# r"))+(?!\p{L}{1,3}\.)", re.V1)
r")+(?!\p{L}{1,3}\.)", re.IGNORECASE)
# MENTIONS, HASHTAGS, ACTION WORDS, UNDERLINE
self.mention = re.compile(r'[@]\w+(?!\w)')
self.hashtag_sequence = re.compile(r'(?<!\w)(?:[#]\w(?:[\w-]*\w)?)+(?!\w)')
self.single_hashtag = re.compile(r'[#]\w(?:[\w-]*\w)?(?!\w)')
self.action_word = re.compile(r'(?<!\w)(?P<a_open>[*+])(?P<b_middle>[^\s*]+)(?P<c_close>[*])(?!\w)')
# a pair of underscores can be used to "underline" some text
self.underline = re.compile(r"(?<!\w)(?P<left>_)(?P<middle>\w[^_]+\w)(?P<right>_)(?!\w)")
# DATE, TIME, NUMBERS
self.three_part_date_year_first = re.compile(r'(?<![\d.]) (?P<a_year>\d{4}) (?P<b_month_or_day>([/-])\d{1,2}) (?P<c_day_or_month>\3\d{1,2}) (?![\d.])', re.VERBOSE)
self.three_part_date_dmy = re.compile(r'(?<![\d.]) (?P<a_day>(?:0?[1-9]|1[0-9]|2[0-9]|3[01])([./-])) (?P<b_month>(?:0?[1-9]|1[0-2])\2) (?P<c_year>(?:\d\d){1,2}) (?![\d.])', re.VERBOSE)
self.three_part_date_mdy = re.compile(r'(?<![\d.]) (?P<a_month>(?:0?[1-9]|1[0-2])([./-])) (?P<b_day>(?:0?[1-9]|1[0-9]|2[0-9]|3[01])\2) (?P<c_year>(?:\d\d){1,2}) (?![\d.])', re.VERBOSE)
self.two_part_date = re.compile(r'(?<![\d.]) (?P<a_day_or_month>\d{1,2}([./-])) (?P<b_day_or_month>\d{1,2}\2) (?![\d.])', re.VERBOSE)
self.time = re.compile(r'(?<!\w)\d{1,2}(?:(?::\d{2}){1,2}){1,2}(?![\d:])')
self.en_time = re.compile(r'(?<![\w])(?P<a_time>\d{1,2}(?:(?:[.:]\d{2})){0,2}) ?(?P<b_am_pm>(?:[ap]m\b|[ap]\.m\.(?!\w)))', re.IGNORECASE)
self.en_us_phone_number = re.compile(r"(?<![\d-])(?:[2-9]\d{2}[/-])?\d{3}-\d{4}(?![\d-])")
self.en_numerical_identifiers = re.compile(r"(?<![\d-])\d+-(?:\d+-)+\d+(?![\d-])|(?<![\d/])\d+/(?:\d+/)+\d+(?![\d/])")
self.en_us_zip_code = re.compile(r"(?<![\d-])\d{5}-\d{4}(?![\d-])")
self.ordinal = re.compile(r'(?<![\w.])(?:\d{1,3}|\d{5,}|[3-9]\d{3})\.(?!\d)')
self.english_ordinal = re.compile(r'\b(?:\d+(?:,\d+)*)?(?:1st|2nd|3rd|\dth)\b')
self.english_decades = re.compile(r"\b(?:[12]\d)?\d0['’]?s\b")
self.fraction = re.compile(r'(?<!\w)\d+/\d+(?![\d/])')
self.calculation = re.compile(r"(?P<arg1>\d+(?:[,.]\d+)?)(?P<op>[+*x×÷−])(?P<arg2>\d+(?:[,.]\d+)?)")
self.amount = re.compile(r'(?<!\w)(?:\d+[\d,.]*-)(?!\w)')
self.semester = re.compile(r'(?<!\w)(?P<a_semester>[WS]S|SoSe|WiSe)(?P<b_jahr>\d\d(?:/\d\d)?)(?!\w)', re.IGNORECASE)
self.measurement = re.compile(r'(?<!\w)(?P<a_amount>[−+-]?\d*[,.]?\d+) ?(?P<b_unit>(?:mm|cm|dm|m|km)(?:\^?[23])?|bit|cent|eur|f|ft|g|gbit/s|ghz|h|hz|kg|l|lb|mbit/s|min|ml|qm|s|sek)(?!\w)', re.IGNORECASE)
# auch Web2.0
self.number_compound = re.compile(r'(?<!\w) (?:\d+-?[\p{L}@][\p{L}@-]* | [\p{L}@][\p{L}@-]*-?\d+(?:\.\d)?) (?!\w)', re.VERBOSE)
self.number = re.compile(r"""(?<!\w|\d[.,]?)
(?:[−+-]? # optional sign
(?:\d* # optional digits before decimal point
[.,])? # optional decimal point
\d+ # digits
(?:[eE][−+-]?\d+)? # optional exponent
|
\d{1,3}(?:[.]\d{3})+(?:,\d+)? # dot for thousands, comma for decimals: 1.999,95
|
\d{1,3}(?:,\d{3})+(?:[.]\d+)? # comma for thousands, dot for decimals: 1,999.95
)
(?![.,]?\d)""", re.VERBOSE)
self.ipv4 = re.compile(r"(?<!\w|\d[.,]?)(?:\d{1,3}[.]){3}\d{1,3}(?![.,]?\d)")
self.section_number = re.compile(r"(?<!\w|\d[.,]?)(?:\d+[.])+\d+[.]?(?![.,]?\d)")
# PUNCTUATION
self.quest_exclam = re.compile(r"([!?]+)")
# arrows
self.arrow = re.compile(r'(-+[ ]?>|<[ ]?-+|[\u2190-\u21ff])')
# parens
self.all_parens = re.compile(r"""(
(?:(?<=\w) # alphanumeric character
[(] # opening paren
(?!inn?[)])) | # not followed by "in)" or "inn)"
(?:(?<!\w) # no alphanumeric character
[(]) | # opening paren
(?:(?<!.[(]in|[(]inn) # not preceded by "(in" or "(inn"
[)]) | # closing paren
[][{}] # curly and square brackets
# (?:(?<!\w) # no alphanumeric character
# [[{(] # opening paren
# (?=\w)) | # alphanumeric character
# (?:(?<=\w) # alphanumeric character
# []})] # closing paren
# (?!\w)) | # no alphanumeric character
# (?:(?:(?<=\s)|^) # space or start of string
# []})] # closing paren
# (?=\w)) | # alphanumeric character
# (?:(?<=\w-) # hyphen
# [)] # closing paren
# (?=\w)) # alphanumeric character
)""", re.VERBOSE | re.IGNORECASE)
self.de_slash = re.compile(r'(/+)(?!in(?:nen)?|en)')
# English possessive and contracted forms
self.en_trailing_apos = re.compile(r"(?<=[sx])(['’])(?![\w'])")
self.en_dms = re.compile(r"(?<=\w)(['’][dms])\b", re.IGNORECASE)
self.en_llreve = re.compile(r"(?<=\w)(['’](?:ll|re|ve))\b", re.IGNORECASE)
self.en_not = re.compile(r"(?<=\w)(n['’]t)\b", re.IGNORECASE)
en_twopart_contractions = [r"\b(?P<p1>a)(?P<p2>lot)\b", r"\b(?P<p1>gon)(?P<p2>na)\b", r"\b(?P<p1>got)(?P<p2>ta)\b", r"\b(?P<p1>lem)(?P<p2>me)\b",
r"\b(?P<p1>out)(?P<p2>ta)\b", r"\b(?P<p1>wan)(?P<p2>na)\b", r"\b(?P<p1>c'm)(?P<p2>on)\b",
r"\b(?P<p1>more)(?P<p2>['’]n)\b", r"\b(?P<p1>d['’])(?P<p2>ye)\b", r"(?<!\w)(?P<p1>['’]t)(?P<p2>is)\b",
r"(?<!\w)(?P<p1>['’]t)(?P<p2>was)\b", r"\b(?P<p1>there)(?P<p2>s)\b", r"\b(?P<p1>i)(?P<p2>m)\b",
r"\b(?P<p1>you)(?P<p2>re)\b", r"\b(?P<p1>he)(?P<p2>s)\b", r"\b(?P<p1>she)(?P<p2>s)\b",
r"\b(?P<p1>ai)(?P<p2>nt)\b", r"\b(?P<p1>are)(?P<p2>nt)\b", r"\b(?P<p1>is)(?P<p2>nt)\b",
r"\b(?P<p1>do)(?P<p2>nt)\b", r"\b(?P<p1>does)(?P<p2>nt)\b", r"\b(?P<p1>did)(?P<p2>nt)\b",
r"\b(?P<p1>i)(?P<p2>ve)\b", r"\b(?P<p1>you)(?P<p2>ve)\b", r"\b(?P<p1>they)(?P<p2>ve)\b",
r"\b(?P<p1>have)(?P<p2>nt)\b", r"\b(?P<p1>has)(?P<p2>nt)\b", r"\b(?P<p1>can)(?P<p2>not)\b",
r"\b(?P<p1>ca)(?P<p2>nt)\b", r"\b(?P<p1>could)(?P<p2>nt)\b", r"\b(?P<p1>wo)(?P<p2>nt)\b",
r"\b(?P<p1>would)(?P<p2>nt)\b", r"\b(?P<p1>you)(?P<p2>ll)\b", r"\b(?P<p1>let)(?P<p2>s)\b"]
en_threepart_contractions = [r"\b(?P<p1>du)(?P<p2>n)(?P<p3>no)\b", r"\b(?P<p1>wha)(?P<p2>dd)(?P<p3>ya)\b", r"\b(?P<p1>wha)(?P<p2>t)(?P<p3>cha)\b", r"\b(?P<p1>i)(?P<p2>'m)(?P<p3>a)\b"]
# w/o, w/out, b/c, b/t, l/c, w/, d/c, u/s
self.en_slash_words = re.compile(r"\b(?:w/o|w/out|b/t|l/c|b/c|d/c|u/s)\b|\bw/(?!\w)", re.IGNORECASE)
# word--word
self.en_twopart_contractions = [re.compile(contr, re.IGNORECASE) for contr in en_twopart_contractions]
self.en_threepart_contractions = [re.compile(contr, re.IGNORECASE) for contr in en_threepart_contractions]
# English hyphenated words
if self.language == "en" or self.language == "en_PTB":
nonbreaking_prefixes = utils.read_abbreviation_file("non-breaking_prefixes_%s.txt" % self.language[:2])
nonbreaking_suffixes = utils.read_abbreviation_file("non-breaking_suffixes_%s.txt" % self.language[:2])
nonbreaking_words = utils.read_abbreviation_file("non-breaking_hyphenated_words_%s.txt" % self.language[:2])
self.en_nonbreaking_prefixes = re.compile(r"(?<![\w-])(?:" + r'|'.join([re.escape(_) for _ in nonbreaking_prefixes]) + r")-[\w-]+", re.IGNORECASE)
self.en_nonbreaking_suffixes = re.compile(r"\b[\w-]+-(?:" + r'|'.join([re.escape(_) for _ in nonbreaking_suffixes]) + r")(?![\w-])", re.IGNORECASE)
self.en_nonbreaking_words = re.compile(r"\b(?:" + r'|'.join([re.escape(_) for _ in nonbreaking_words]) + r")\b", re.IGNORECASE)
self.en_hyphen = re.compile(r"(?<=\w)-+(?=\w)")
self.en_no = re.compile(r"\b(no\.)\s*(?=\d)", re.IGNORECASE)
self.en_degree = re.compile(r"(?<=\d ?)°(?:F|C|Oe)\b", re.IGNORECASE)
# quotation marks
# L'Enfer, d'accord, O'Connor
self.letter_apostrophe_word = re.compile(r"\b([dlo]['’]\p{L}+)\b", re.IGNORECASE)
self.double_latex_quote = re.compile(r"(?:(?<!`)``(?!`))|(?:(?<!')''(?!'))")
self.paired_single_latex_quote = re.compile(r"(?<!`)(?P<left>`)(?P<middle>[^`']+)(?P<right>')(?!')")
self.paired_single_quot_mark = re.compile(r"(?<!\p{L})(?P<left>['])(?P<middle>[^']+)(?P<right>['])(?!\p{L})")
# Musical notes, two programming languages
self.letter_sharp = re.compile(r"\b[acdfg]#(?:-\p{L}+)?(?!\w)", re.IGNORECASE)
self.other_punctuation = re.compile(r'([#<>%‰€$£₤¥°@~*„“”‚‘"»«›‹,;:+×÷±≤≥=&–—])')
self.en_quotation_marks = re.compile(r'([„“”‚‘’"»«›‹])')
self.en_other_punctuation = re.compile(r'([#<>%‰€$£₤¥°@~*,;:+×÷±≤≥=&/–—-]+)')
self.ellipsis = re.compile(r'\.{2,}|…+(?:\.{2,})?')
self.dot_without_space = re.compile(r'(?<=\p{Ll}{2})(\.)(?=\p{Lu}\p{Ll}{2})')
# self.dot = re.compile(r'(?<=[\w)])(\.)(?![\w])')
self.dot = re.compile(r'(\.)')
# Soft hyphen „“
def _split_on_boundaries(self, node, boundaries, token_class, *, lock_match=True, delete_whitespace=False):
""""""
n = len(boundaries)
if n == 0:
return
token_dll = node.list
prev_end = 0
for i, (start, end, replacement) in enumerate(boundaries):
original_spelling = None
left_space_after, match_space_after = False, False
left = node.value.text[prev_end:start]
match = node.value.text[start:end]
if replacement is not None:
if match != replacement:
original_spelling = match
match = replacement
right = node.value.text[end:]
prev_end = end
if left.endswith(" ") or match.startswith(" "):
left_space_after = True
if match.endswith(" ") or right.startswith(" "):
match_space_after = True
elif right == "":
match_space_after = node.value.space_after
left = left.strip()
match = match.strip()
right = right.strip()
if delete_whitespace:
match_wo_spaces = match.replace(" ", "")
if match_wo_spaces != match:
if original_spelling is None:
original_spelling = match
match = match_wo_spaces
first_in_sentence, match_last_in_sentence, right_last_in_sentence = False, False, False
if i == 0:
first_in_sentence = node.value.first_in_sentence
if i == n - 1:
match_last_in_sentence = node.value.last_in_sentence
if right != "":
match_last_in_sentence = False
right_last_in_sentence = node.value.last_in_sentence
if left != "":
token_dll.insert_left(Token(left, token_class="regular", space_after=left_space_after, first_in_sentence=first_in_sentence), node)
first_in_sentence = False
token_dll.insert_left(Token(match, locked=lock_match,
token_class=token_class,
space_after=match_space_after,
original_spelling=original_spelling,
first_in_sentence=first_in_sentence,
last_in_sentence=match_last_in_sentence),
node)
if i == n - 1 and right != "":
token_dll.insert_left(Token(right, token_class="regular", space_after=node.value.space_after, last_in_sentence=right_last_in_sentence), node)
token_dll.remove(node)
def _split_matches(self, regex, node, token_class="regular", repl=None, split_named_subgroups=True, delete_whitespace=False):
boundaries = []
split_groups = split_named_subgroups and len(regex.groupindex) > 0
group_numbers = sorted(regex.groupindex.values())
for m in regex.finditer(node.value.text):
if split_groups:
for g in group_numbers:
if m.span(g) != (-1, -1):
boundaries.append((m.start(g), m.end(g), None))
else:
if repl is None:
boundaries.append((m.start(), m.end(), None))
else:
boundaries.append((m.start(), m.end(), m.expand(repl)))
self._split_on_boundaries(node, boundaries, token_class, delete_whitespace=delete_whitespace)
def _split_emojis(self, node, token_class="emoticon"):
boundaries = []
for m in re.finditer(r"\X", node.value.text):
if m.end() - m.start() > 1:
if re.search(r"[\p{Extended_Pictographic}\p{Emoji_Presentation}\uFE0F]", m.group()):
boundaries.append((m.start(), m.end(), None))
else:
if re.search(r"[\p{Extended_Pictographic}\p{Emoji_Presentation}]", m.group()):
boundaries.append((m.start(), m.end(), None))
self._split_on_boundaries(node, boundaries, token_class)
def _split_set(self, regex, node, items, token_class="regular", to_lower=False):
boundaries = []
for m in regex.finditer(node.value.text):
instance = m.group(0)
if to_lower:
instance = instance.lower()
if instance in items:
boundaries.append((m.start(), m.end(), None))
self._split_on_boundaries(node, boundaries, token_class)
def _split_left(self, regex, node):
boundaries = []
prev_end = 0
for m in regex.finditer(node.value.text):
boundaries.append((prev_end, m.start(), None))
prev_end = m.start()
self._split_on_boundaries(node, boundaries, token_class=None, lock_match=False)
def _split_all_matches(self, regex, token_dll, token_class="regular", *, repl=None, split_named_subgroups=True, delete_whitespace=False):
"""Turn matches for the regex into tokens."""
for t in token_dll:
if t.value.markup or t.value._locked:
continue
self._split_matches(regex, t, token_class, repl, split_named_subgroups, delete_whitespace)
def _split_all_matches_in_match(self, regex1, regex2, token_dll, token_class="regular", *, delete_whitespace=False):
"""Find all matches for regex1 and turn all matches for regex2 within
the matches for regex1 into tokens.
"""
for t in token_dll:
if t.value.markup or t.value._locked:
continue
boundaries = []
for m1 in regex1.finditer(t.value.text):
for m2 in regex2.finditer(m1.group(0)):
boundaries.append((m2.start() + m1.start(), m2.end() + m1.start(), None))
self._split_on_boundaries(t, boundaries, token_class, delete_whitespace=delete_whitespace)
def _split_all_emojis(self, token_dll, token_class="emoticon"):
"""Replace all emoji sequences"""
self._split_all_matches(self.textfaces_emoji, token_dll, "emoticon")
for t in token_dll:
if t.value.markup or t.value._locked:
continue
self._split_emojis(t, token_class)
def _split_all_set(self, token_dll, regex, items, token_class="regular", to_lower=False):
"""Turn all elements from items into separate tokens. Note: All
elements need to be matched by regex. Optionally lowercase the
matches before the comparison. Note: to_lower does not modify
the elements of items, i.e. setting to_lower=True only makes
sense if the elements of items are already in lowercase.
"""
for t in token_dll:
if t.value.markup or t.value._locked:
continue
self._split_set(regex, t, items, token_class, to_lower)
def _split_all_left(self, regex, token_dll):
"""Split to the left of the match."""
for t in token_dll:
if t.value.markup or t.value._locked:
continue
self._split_left(regex, t)
def _split_abbreviations(self, token_dll, split_multipart_abbrevs=True):
"""Turn instances of abbreviations into tokens."""
self._split_all_matches(self.single_letter_ellipsis, token_dll, "abbreviation")
self._split_all_matches(self.and_cetera, token_dll, "abbreviation")
self._split_all_matches(self.str_abbreviations, token_dll, "abbreviation")
self._split_all_matches(self.nr_abbreviations, token_dll, "abbreviation")
self._split_all_matches(self.single_token_abbreviation, token_dll, "abbreviation")
self._split_all_matches(self.single_letter_abbreviation, token_dll, "abbreviation")
self._split_all_matches(self.ps, token_dll, "abbreviation")
for t in token_dll:
if t.value.markup or t.value._locked:
continue
boundaries = []
for m in self.abbreviation.finditer(t.value.text):
instance = m.group(0)
if split_multipart_abbrevs and self.multipart_abbreviation.fullmatch(instance):
start, end = m.span(0)
s = start
for i, c in enumerate(instance, start=1):
if c == ".":
boundaries.append((s, start + i, None))
s = start + i
else:
boundaries.append((m.start(), m.end(), None))
self._split_on_boundaries(t, boundaries, "abbreviation")
def _split_paired(self, regex, token_dll, token_class="regular"):
"""Split off paired elements (with capture groups named "left" and
"right"). Currently, this only operates on a single segment.
"""
for t in token_dll:
if t.value.markup or t.value._locked:
continue
boundaries = []
for m in regex.finditer(t.value.text):
boundaries.append((m.start("left"), m.end("left"), None))
boundaries.append((m.start("right"), m.end("right"), None))
self._split_on_boundaries(t, boundaries, token_class)
def _remove_empty_tokens(self, token_dll):
for t in token_dll:
if t.value.markup or t.value._locked:
continue
if self.spaces_or_empty.search(t.value.text):
if t.value.first_in_sentence:
next_non_markup = token_dll.next_matching(t, operator.attrgetter("value.markup"), False)
if next_non_markup is not None:
next_non_markup.value.first_in_sentence = True
if t.value.last_in_sentence:
previous_non_markup = token_dll.previous_matching(t, operator.attrgetter("value.markup"), False)
if previous_non_markup is not None:
previous_non_markup.value.last_in_sentence = True
token_dll.remove(t)
def _tokenize(self, token_dll):
"""Tokenize paragraph (may contain newlines) according to the
guidelines of the EmpiriST 2015 shared task on automatic
linguistic annotation of computer-mediated communication /
social media.
"""
for t in token_dll:
if t.value.markup or t.value._locked:
continue
# convert to Unicode normal form C (NFC)
t.value.text = unicodedata.normalize("NFC", t.value.text)
# normalize whitespace
t.value.text = self.spaces.sub(" ", t.value.text)
# get rid of control characters
t.value.text = self.controls.sub("", t.value.text)
# get rid of isolated variation selectors
t.value.text = self.stranded_variation_selector.sub("", t.value.text)
# normalize whitespace
t.value.text = self.spaces.sub(" ", t.value.text)
# Some tokens are allowed to contain whitespace. Get those out
# of the way first.
# - XML tags
self._split_all_matches(self.xml_declaration, token_dll, "XML_tag")
self._split_all_matches(self.tag, token_dll, "XML_tag")
# Emoji sequences can contain zero-width joiners. Get them out
# of the way next
# First textfaces that contain whitespace:
self._split_all_matches(self.textfaces_space, token_dll, "emoticon")
# Then flags:
self._split_all_matches(self.unicode_flags, token_dll, "emoticon")
# Then all other emojis
self._split_all_emojis(token_dll, "emoticon")
for t in token_dll:
if t.value.markup or t.value._locked:
continue
# get rid of other junk characters
t.value.text = self.other_nasties.sub("", t.value.text)
# normalize whitespace
t.value.text = self.spaces.sub(" ", t.value.text)
# Remove empty tokens
self._remove_empty_tokens(token_dll)
# Some emoticons contain erroneous spaces. We fix this.
self._split_all_matches(self.space_emoticon, token_dll, "emoticon", repl=r'\1\2')
# obfuscated email addresses can contain spaces
self._split_all_matches(self.email, token_dll, "email_address", delete_whitespace=True)
# urls
self._split_all_matches(self.simple_url_with_brackets, token_dll, "URL")
self._split_all_matches(self.simple_url, token_dll, "URL")
self._split_all_matches(self.doi, token_dll, "URL")
self._split_all_matches(self.doi_with_space, token_dll, "URL")
self._split_all_matches(self.url_without_protocol, token_dll, "URL")
self._split_all_matches(self.reddit_links, token_dll, "URL")
# XML entities
self._split_all_matches(self.entity, token_dll, "XML_entity")
# emoticons
self._split_all_matches(self.heart_emoticon, token_dll, "emoticon")
self._split_all_matches(self.emoticon, token_dll, "emoticon")
self._split_all_matches(self.symbols_and_dingbats, token_dll, "emoticon")
# mentions, hashtags
self._split_all_matches(self.mention, token_dll, "mention")
self._split_all_matches_in_match(self.hashtag_sequence, self.single_hashtag, token_dll, "hashtag")
# action words
self._split_all_matches(self.action_word, token_dll, "action_word")
# underline
self._split_paired(self.underline, token_dll)
# textual representations of emoji
self._split_all_matches(self.emoji, token_dll, "emoticon")
# tokens with + or &
self._split_all_matches(self.token_with_plus_ampersand, token_dll)
self._split_all_set(token_dll, self.simple_plus_ampersand_candidates, self.simple_plus_ampersand, to_lower=True)
# camelCase
if self.split_camel_case:
self._split_all_matches(self.camel_case_token, token_dll)
self._split_all_set(token_dll, self.simple_camel_case_candidates, self.simple_camel_case_tokens)
self._split_all_matches(self.in_and_innen, token_dll)
self._split_all_left(self.camel_case, token_dll)
# gender marker
self._split_all_matches(self.gender_marker, token_dll)
# English possessive and contracted forms
if self.language == "en" or self.language == "en_PTB":
self._split_all_matches(self.english_decades, token_dll, "number_compound")
self._split_all_matches(self.en_dms, token_dll)
self._split_all_matches(self.en_llreve, token_dll)
self._split_all_matches(self.en_not, token_dll)
self._split_all_left(self.en_trailing_apos, token_dll)
for contraction in self.en_twopart_contractions:
self._split_all_matches(contraction, token_dll)
for contraction in self.en_threepart_contractions:
self._split_all_matches(contraction, token_dll)
self._split_all_matches(self.en_no, token_dll)
self._split_all_matches(self.en_degree, token_dll)
self._split_all_matches(self.en_nonbreaking_words, token_dll)
self._split_all_matches(self.en_nonbreaking_prefixes, token_dll)
self._split_all_matches(self.en_nonbreaking_suffixes, token_dll)
# remove known abbreviations
split_abbreviations = False if self.language == "en" or self.language == "en_PTB" else True
self._split_abbreviations(token_dll, split_multipart_abbrevs=split_abbreviations)
# DATES AND NUMBERS
self._split_all_matches(self.isbn, token_dll, "number", delete_whitespace=True)
# dates
split_dates = False if self.language == "en" or self.language == "en_PTB" else True
self._split_all_matches(self.three_part_date_year_first, token_dll, "date", split_named_subgroups=split_dates)
self._split_all_matches(self.three_part_date_dmy, token_dll, "date", split_named_subgroups=split_dates)
self._split_all_matches(self.three_part_date_mdy, token_dll, "date", split_named_subgroups=split_dates)
self._split_all_matches(self.two_part_date, token_dll, "date", split_named_subgroups=split_dates)
# time
if self.language == "en" or self.language == "en_PTB":
self._split_all_matches(self.en_time, token_dll, "time")
self._split_all_matches(self.time, token_dll, "time")
# US phone numbers and ZIP codes
if self.language == "en" or self.language == "en_PTB":
self._split_all_matches(self.en_us_phone_number, token_dll, "number")
self._split_all_matches(self.en_us_zip_code, token_dll, "number")
self._split_all_matches(self.en_numerical_identifiers, token_dll, "number")
# ordinals
if self.language == "de" or self.language == "de_CMC":
self._split_all_matches(self.ordinal, token_dll, "ordinal")
elif self.language == "en" or self.language == "en_PTB":
self._split_all_matches(self.english_ordinal, token_dll, "ordinal")
# fractions
self._split_all_matches(self.fraction, token_dll, "number")
# calculations
self._split_all_matches(self.calculation, token_dll, "number")
# amounts (1.000,-)
self._split_all_matches(self.amount, token_dll, "amount")
# semesters
self._split_all_matches(self.semester, token_dll, "semester")
# measurements
self._split_all_matches(self.measurement, token_dll, "measurement")
# number compounds
self._split_all_matches(self.number_compound, token_dll, "number_compound")
# numbers
self._split_all_matches(self.number, token_dll, "number")
self._split_all_matches(self.ipv4, token_dll, "number")
self._split_all_matches(self.section_number, token_dll, "number")
# (clusters of) question marks and exclamation marks
self._split_all_matches(self.quest_exclam, token_dll, "symbol")
# arrows
self._split_all_matches(self.arrow, token_dll, "symbol", delete_whitespace=True)
# parens
self._split_all_matches(self.all_parens, token_dll, "symbol")
# slash
if self.language == "en" or self.language == "en_PTB":
self._split_all_matches(self.en_slash_words, token_dll, "regular")
if self.language == "de" or self.language == "de_CMC":
self._split_all_matches(self.de_slash, token_dll, "symbol")
# O'Connor and French omitted vocals: L'Enfer, d'accord
self._split_all_matches(self.letter_apostrophe_word, token_dll)
# LaTeX-style quotation marks
self._split_all_matches(self.double_latex_quote, token_dll, "symbol")
self._split_paired(self.paired_single_latex_quote, token_dll, "symbol")
# single quotation marks, apostrophes
self._split_paired(self.paired_single_quot_mark, token_dll, "symbol")
# other punctuation symbols
# paragraph = self._replace_regex(paragraph, self.dividing_line, "symbol")
self._split_all_matches(self.letter_sharp, token_dll, "regular")
if self.language == "en" or self.language == "en_PTB":
self._split_all_matches(self.en_hyphen, token_dll, "symbol")
self._split_all_matches(self.en_quotation_marks, token_dll, "symbol")
self._split_all_matches(self.en_other_punctuation, token_dll, "symbol")
else:
self._split_all_matches(self.other_punctuation, token_dll, "symbol")
# ellipsis
self._split_all_matches(self.ellipsis, token_dll, "symbol")
# dots
self._split_all_matches(self.dot_without_space, token_dll, "symbol")
self._split_all_matches(self.dot, token_dll, "symbol")
# Split on whitespace
for t in token_dll:
if t.value.markup or t.value._locked:
continue
wt = t.value.text.split()
n_wt = len(wt)
for i, tok in enumerate(wt):
if i == n_wt - 1:
token_dll.insert_left(Token(tok, token_class="regular", space_after=t.value.space_after), t)
else:
token_dll.insert_left(Token(tok, token_class="regular", space_after=True), t)
token_dll.remove(t)
return token_dll.to_list()
def _convert_to_legacy(self, tokens):
if self.token_classes and self.extra_info:
tokens = [(t.text, t.token_class, t.extra_info) for t in tokens]
elif self.token_classes:
tokens = [(t.text, t.token_class) for t in tokens]
elif self.extra_info:
tokens = [(t.text, t.extra_info) for t in tokens]
else:
tokens = [t.text for t in tokens]
return tokens
def tokenize(self, paragraph):
"""An alias for tokenize_paragraph"""
logging.warning("Since version 2.0.0, somajo.Tokenizer.tokenize() is deprecated. Please use somajo.SoMaJo.tokenize_text() instead. For more details see https://github.com/tsproisl/SoMaJo/blob/master/doc/build/markdown/somajo.md")
return self.tokenize_paragraph(paragraph)
def tokenize_file(self, filename, parsep_empty_lines=True):
"""Tokenize utf-8-encoded text file and yield tokenized paragraphs."""
logging.warning("Since version 2.0.0, somajo.Tokenizer.tokenize_file() is deprecated. Please use somajo.SoMaJo.tokenize_text_file() instead. For more details see https://github.com/tsproisl/SoMaJo/blob/master/doc/build/markdown/somajo.md")
with open(filename, encoding="utf-8") as f:
parsep = "single_newlines"
if parsep_empty_lines:
parsep = "empty_lines"
paragraphs = utils.get_paragraphs_str(f, paragraph_separator=parsep)
tokenized_paragraphs = map(self.tokenize_paragraph, paragraphs)
for tp in tokenized_paragraphs:
if tp:
yield tp
def tokenize_paragraph(self, paragraph):
"""Tokenize paragraph (may contain newlines) according to the
guidelines of the EmpiriST 2015 shared task on automatic
linguistic annotation of computer-mediated communication /
social media.
"""
logging.warning("Since version 2.0.0, somajo.Tokenizer.tokenize_paragraph() is deprecated. Please use somajo.SoMaJo.tokenize_text() instead. For more details see https://github.com/tsproisl/SoMaJo/blob/master/doc/build/markdown/somajo.md")
token_dll = doubly_linked_list.DLL([Token(paragraph, first_in_sentence=True, last_in_sentence=True)])
tokens = self._tokenize(token_dll)
return self._convert_to_legacy(tokens)
def tokenize_xml(self, xml, is_file=True, eos_tags=None):
"""Tokenize XML file or XML string according to the guidelines of the
EmpiriST 2015 shared task on automatic linguistic annotation
of computer-mediated communication / social media.
"""
logging.warning("Since version 2.0.0, somajo.Tokenizer.tokenize_xml() is deprecated. Please use somajo.SoMaJo.tokenize_xml() instead. For more details see https://github.com/tsproisl/SoMaJo/blob/master/doc/build/markdown/somajo.md")
token_dlls = map(doubly_linked_list.DLL, utils.xml_chunk_generator(xml, is_file, eos_tags))
tokens = map(self._tokenize, token_dlls)
tokens = map(utils.escape_xml_tokens, tokens)
tokens = map(self._convert_to_legacy, tokens)
return list(itertools.chain.from_iterable(tokens))
|
tsproisl/SoMaJo
|
somajo/tokenizer.py
|
Python
|
gpl-3.0
| 53,985
|
[
"FEFF"
] |
c3a3918c21fe97c77c6469deaa66194a7235ab227f5a5339a5032a7ca64fc493
|
import numpy as np
import pytest
import psi4
from .utils import compare_values
# Reference data generated from Psi's dfmp2 module
data = {
"df-mp2 ae": np.array([[ 0, 0, 9.62190509e-03],
[0, 5.49835030e-03, -4.81095255e-03],
[0, -5.49835030e-03, -4.81095255e-03]]),
"df-mp2 fc": np.array([[ 0, 0, 1.02432654e-02],
[0, 5.88581965e-03, -5.12163268e-03],
[0, -5.88581965e-03, -5.12163268e-03]]),
"df-mp2 fv": np.array([[ 0, 0, 1.22918833e-02],
[0, 5.52107556e-03, -6.14594166e-03],
[0, -5.52107556e-03, -6.14594166e-03]]),
"df-mp2 fc/fv": np.array([[ 0, 0, 1.25984187e-02],
[0, 5.71563223e-03, -6.29920936e-03],
[0, -5.71563223e-03, -6.29920936e-03]])
}
@pytest.mark.slow
# TODO: That "true" needs to be a string is silly. Convert it to a boolean when you can do that without incurring a NaN energy.
@pytest.mark.parametrize("inp", [
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref': data["df-mp2 ae"]}, id='df-mp2 ae'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df', 'freeze_core': 'true'}, 'ref': data["df-mp2 fc"]}, id='df-mp2 fc'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df', 'num_frozen_uocc': 4}, 'ref': data["df-mp2 fv"]}, id='df-mp2 fv'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df', 'freeze_core': 'true', 'num_frozen_uocc': 4}, 'ref': data["df-mp2 fc/fv"]}, id='df-omp2 fc/fv')
]
)
def test_gradient(inp):
h2o = psi4.geometry("""
O
H 1 0.958
H 1 0.958 2 104.5
""")
psi4.set_options({'basis': 'aug-cc-pvdz', 'points': 5})
psi4.set_options(inp['options'])
analytic_gradient = psi4.gradient(inp['name'], dertype=1)
print(analytic_gradient)
findif_gradient = psi4.gradient(inp['name'], dertype=0)
reference_gradient = inp["ref"]
assert compare_values(findif_gradient, analytic_gradient, 5, "analytic vs. findif gradient")
assert compare_values(reference_gradient, analytic_gradient.np, 5, "analytic vs. reference gradient")
|
jgonthier/psi4
|
tests/pytests/test_gradients.py
|
Python
|
lgpl-3.0
| 2,081
|
[
"Psi4"
] |
288972dcc920481b1db104a59396298e732892d3990212ba538533992e160ce6
|
# IWDEV Pi Traffic lights
import RPi.GPIO as GPIO
import time
# set GPIO Mode
GPIO.setmode(GPIO.BCM)
# set pin mode
GPIO.setup(02,GPIO.OUT)
GPIO.setup(03,GPIO.OUT)
GPIO.setup(04,GPIO.OUT)
# set inital pin state
GPIO.output(02,False)
GPIO.output(03,False)
GPIO.output(04,False)
# start a loop to control the lights.
while True:
print "Red on"
GPIO.output(02,True)
time.sleep(1)
print "Red off"
GPIO.output(02,False)
time.sleep(1)
print "Amber on"
GPIO.output(03,True)
time.sleep(1)
print "Amber off"
GPIO.output(03,False)
time.sleep(1)
print "Green on"
GPIO.output(04,True)
time.sleep(1)
print "Green off"
GPIO.output(04,False)
time.sleep(1)
|
qubecad/Pi_traffic_lights_IWDEV
|
gpio_traffic_lights.py
|
Python
|
apache-2.0
| 739
|
[
"Amber"
] |
60e8d5b7d8c3cd8cd5908438a67b90aace96ea72658540f6aac54d99456cf955
|
from pymol.wizard import Wizard
from pymol import cmd
import pymol
import traceback
sele_prefix = "_pf_s_"
sele_prefix_len = len(sele_prefix)
dist_prefix = "_pf_d_"
indi_sele = "_indicate_pf"
class Pair_fit(Wizard):
def __init__(self,_self=cmd):
Wizard.__init__(self,_self)
self.memory = 0
self.n_pair = 0
self.status = 0 # 0 no atoms selections, 1 atom selected
self.message = None
self.selection_mode = cmd.get_setting_legacy("mouse_selection_mode")
cmd.set("mouse_selection_mode",0) # set selection mode to atomic
cmd.deselect() # disable the active selection (if any)
def get_panel(self):
return [
[ 1, 'Pair Fitting',''],
[ 2, 'Fit %d Pairs'%self.n_pair,'cmd.get_wizard().fit()'],
[ 2, 'Delete Last Pair','cmd.get_wizard().remove_last()'],
[ 2, 'Redraw','cmd.get_wizard().update_dashes()'],
[ 2, 'Clear','cmd.get_wizard().clear()'],
[ 2, 'Done','cmd.set_wizard()'],
]
def cleanup(self):
self.clear()
cmd.set("mouse_selection_mode",self.selection_mode) # restore selection mode
def clear(self):
cmd.delete(sele_prefix+"*")
cmd.delete(dist_prefix+"*")
cmd.delete(indi_sele)
lst = cmd.get_names('selections')
self.n_pair = 0
self.status = 0
self.message = None
cmd.unpick()
cmd.refresh_wizard()
def get_prompt(self):
self.prompt = None
if self.status==0:
self.prompt = [ 'Pick the mobile atom...']
elif self.status==1:
self.prompt = [ 'Pick the target atom...' ]
if self.message!=None:
self.prompt.append(self.message)
return self.prompt
def set_status(self,status):
self.status = status
cmd.refresh_wizard()
def get_sele_list(self,mode='all'):
lst = cmd.get_names('selections')
lst = filter(lambda x:x[0:sele_prefix_len]==sele_prefix,lst)
lst.sort()
if mode == 'mobile': # mobile
lst=filter(lambda x:x[-1:]=='b',lst)
elif mode == 'target': # target
lst=filter(lambda x:x[-1:]=='a',lst)
return lst
def fit(self):
# build up the pair-wise list of selections
cmd.delete(dist_prefix+"*")
lst = self.get_sele_list()
c = 0
args = []
while 1:
if not len(lst): break
a = lst.pop()
if not len(lst): break
b = lst.pop()
args.append(a)
args.append(b)
# do the fit
if len(args):
cmd.push_undo(args[0])
dist = apply(cmd.pair_fit,args)
self.message = "RMS over %d pairs = %5.3f"%(self.n_pair,dist)
cmd.refresh_wizard()
self.update_dashes()
def remove_last(self):
# build up the pair-wise list of selections
cmd.delete(dist_prefix+"*")
lst = self.get_sele_list()
if len(lst):
cmd.delete(lst.pop())
if len(lst):
cmd.delete(lst.pop())
self.n_pair = self.n_pair - 1
self.update_dashes()
self.status=0
cmd.refresh_wizard()
def update_dashes(self):
cmd.delete(dist_prefix+"*")
lst = self.get_sele_list()
c = 0
while 1:
if not len(lst): break
a = lst.pop()
if not len(lst): break
b = lst.pop()
name = dist_prefix+str(c)
cmd.dist(name,a,b,width=7,length=0.05,gap=0.05)
cmd.hide('label',name)
cmd.enable(name)
c = c + 1
def check_same_object(self,lst,sele):
if not len(lst):
return 1
else:
if cmd.count_atoms("((byobj %s) and %s)"%(lst[0],sele),quiet=1):
return 1
return 0
def check_different_object(self,lst,sele):
if not len(lst):
return 1
else:
if not cmd.count_atoms("((byobj %s) and %s)"%(lst[0],sele),quiet=1):
return 1
return 0
def do_select(self,name): # map selects into picks
cmd.unpick()
try:
cmd.edit(name + " and not " + sele_prefix + "*") # note, using new object name wildcards
cmd.delete(name)
self.do_pick(0)
except pymol.CmdException:
traceback.print_exc()
pass
def do_pick(self,bondFlag):
if bondFlag:
self.message = "Error: please select an atom, not a bond."
print self.message
else:
if self.status==0:
lst = self.get_sele_list(mode='mobile')
if not self.check_same_object(lst,"(pk1)"):
self.message = "Error: must select an atom in the same object as before."
print self.message
else:
name = sele_prefix + "%02db"%self.n_pair # mobile end in 'b'
cmd.select(name,"(pk1)")
cmd.unpick()
cmd.select(indi_sele,name)
cmd.enable(indi_sele)
self.status = 1
self.message = None
elif self.status==1:
lst = self.get_sele_list(mode='target')
if not self.check_same_object(lst,"(pk1)"):
self.message = "Error: must select an atom in the same object as before."
print self.message
else:
lst = self.get_sele_list(mode='mobile')
if not self.check_different_object(lst,"(pk1)"):
self.message = "Error: target atom must be in a distinct object."
print self.message
else:
name = sele_prefix + "%02da"%self.n_pair # target end in 'a'
cmd.select(name,"(pk1)")
cmd.unpick()
cmd.select(indi_sele,name)
cmd.enable(indi_sele)
self.n_pair = self.n_pair + 1
self.status = 0
self.update_dashes()
cmd.refresh_wizard()
|
gratefulfrog/lib
|
python/pymol/wizard/pair_fit.py
|
Python
|
gpl-2.0
| 6,453
|
[
"PyMOL"
] |
1a4d3d07bda26b2752868834cacf9137750d07a93690e69da2d59975787d77ff
|
# -*- encoding: utf-8 -*-
"""Functions for controlling family-wise error rate (FWER) using random
field theory (RFT) techniques and drawing diagnostic plots.
Author: Tuomas Puoliväli
Email: tuomas.puolivali@helsinki.fi
Last modified: 26th July 2018
License: Revised 3-clause BSD
Source: https://github.com/puolival/multipy/blob/master/rft.py
References:
Brett M, Penny W, Kiebel S (2003): An introduction to random field theory.
URL: https://www.fil.ion.ucl.ac.uk/spm/doc/books/hbf2/pdfs/Ch14.pdf
Worsley KJ, Evans AC, Marrett S, Neelin P (1992): A three-dimensional
statistical analysis for CBF activation studies in human brain. Journal of
Cerebral Blood Flow and Metabolism 12:900-918.
http://matthew-brett.github.io/teaching/random_fields.html
WARNING: These functions have not been entirely validated yet.
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.stats import norm, zscore
from scipy.ndimage.filters import gaussian_filter
from skimage.measure import label
def _n_resels(X, fwhm):
"""Function for estimating the number of resolution elements or resels.
Input arguments:
================
X : ndarray of floats
Two-dimensional data array containing the analyzed data.
fwhm : float
Size of the smoothing kernel measured as full-width at half maximum
(FWHM).
"""
"""Estimate the number of resolution elements. Here the idea is to simply
compute how many FWHM sized blocsk are needed to cover the entire area of
X. TODO: Replace with a better (proper?) method."""
nx, ny = np.shape(X)
R = float(nx * ny) / float(fwhm ** 2)
return R
def _expected_ec_2d(R, Z):
"""Function for computing the expected value of the Euler characteristic
of a Gaussian field.
Input arguments:
================
R : float
The number of resels or resolution elements.
Z : float
The applied Z-score threshold.
"""
EC = R * (4*np.log(2)) * (2*np.pi)**(-3/2.) * Z*np.exp(-1/2.*Z**2)
return EC
def _threshold(X, Z):
"""Function for thresholding smoothed data. The data is z-scored before
thresholding.
Input arguments:
================
X : ndarray of floats
The thresholded array.
Z : float
The Z-score threshold.
"""
Y = np.zeros(np.shape(X))
# Z-score X
X = X - np.mean(X)
X = X / np.std(X)
# Threshold
Y[X > Z] = 1
return Y
def _ec_2d(X):
"""Function for computing the empirical Euler characteristic of a given
thresholded data array.
Input arguments:
================
Y : ndarray of floats
The thresholded image. Ones correspond to activated regions.
Output arguments:
=================
ec : float
The empirical Euler characteristic.
"""
# TODO: check for holes in the activated regions.
_, ec = label(X, neighbors=None, background=0, return_num=True,
connectivity=2)
return ec
def plot_ec(X, fwhm, Z_low=0, Z_high=5):
"""Plot expected and empirical Euler characteristics (ECs) for a range
of different Z-scores.
Input arguments:
================
X : ndarray of floats
The analyzed two-dimensional data.
fwhm : float
Width of the spatial smoothing kernel described as full-width at
half maximum (FWHM).
Z_low, z_hig : float, float
The lowest and highest z-scores for which to compute the EC.
"""
"""Estimate number of resels."""
R = _n_resels(X, fwhm)
"""Compute the expected Euler characteristic for Z-scores in the
desired range."""
Z = np.linspace(Z_low, Z_high, 100)
expected_ec = np.asarray([_expected_ec_2d(R, z) for z in Z])
"""Compute empirical Euler characteristics for the same Z-scores."""
empirical_ec = np.zeros(np.shape(Z))
for i, z in enumerate(Z):
Y = _smooth(X, fwhm)
Yt = _threshold(Y, z)
empirical_ec[i] = _ec_2d(Yt)
"""Plot the data."""
sns.set_style('darkgrid')
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(Z, expected_ec, linewidth=2)
ax.plot(Z, empirical_ec, linewidth=2)
ax.set_xlabel('Z-score threshold')
ax.set_ylabel('Euler characteristic')
ax.legend(['Expected', 'Empirical'])
fig.tight_layout()
plt.show()
def plot_expected_ec(R, Z_low=0, Z_high=5):
"""Function for drawing a graph of the expected Euler characteristic
as a function of Z-score threshold.
Input arguments:
================
R : float
The number of resels or resolution elements.
Z_low : float
The lowest z-score for which to plot the expected Euler characteristic
E[EC].
Z_high : float
The highest z-score for which to plot E[EC].
"""
"""Compute the expected Euler characteristic for Z-scores in the
desired range."""
Z = np.linspace(Z_low, Z_high, 100)
EC = np.asarray([_expected_ec_2d(R, z) for z in Z])
"""Draw the plot."""
sns.set_style('darkgrid')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Z, EC)
ax.set_xlabel('Z-score threshold')
ax.set_ylabel('Expected Euler characteristic')
ax.set_xlim([np.min(Z)-0.1, np.max(Z)+0.1])
# +- 2%
c = 2*np.max(EC)/100
ax.set_ylim([-c, np.max(EC)+c])
fig.tight_layout()
plt.show()
def _smooth(X, fwhm):
"""Function for spatial smoothing using a Gaussian kernel.
Input arguments:
================
X : ndarray of floats
The analyzed two-dimensional data.
fwhm : float
The width of the smoothing kernel described as full-width at half
maximum (FWHM).
"""
"""Compute standard deviation corresponding to the given
full-width at half maximum value."""
sd = fwhm / np.sqrt(8*np.log(2))
"""Smooth the data."""
# TODO: consider which filter mode should be used
Y = gaussian_filter(X, sigma=sd, mode='wrap')
return Y
def rft_2d(X, fwhm, alpha=0.05, verbose=True):
"""Function for controlling the FWER using random field theory (RFT) when
the analyzed data is two-dimensional (e.g. time-frequency or single slice
anatomical data).
Input arguments:
================
X : ndarray of floats
The analyzed two-dimensional statistical map.
fwhm : float
Size of the smoothing kernel measured as full-width at half maximum
(FWMH).
alpha : float
The desired critical level. The conventional 0.05 level is used as
the default value.
verbose : bool
Whether to print data on intermediate estimation results.
Output arguments:
=================
X_thr : ndarray of floats
An array with with equal dimensions to X indicating which elements are
statistically significant. Ones and zeros correspond to significant and
non-significant element respectively.
X_smooth : ndarray of floats
The analyzed data after spatial smoothing.
ec : float
The empirical Euler characteristic.
"""
"""Print what the Bonferroni threshold would be."""
n_tests = np.prod(np.shape(X))
thr_bonferroni = norm.ppf(1 - alpha / n_tests)
if (verbose):
print('The Bonferroni threshold is Z = %2.2f' % thr_bonferroni)
"""Estimate the number of resolution elements."""
R = _n_resels(X, fwhm)
if (verbose):
print('The estimated number of resels is %d' % R)
"""Find z-score threshold that gives the chosen family-wise error
rate."""
Z = np.linspace(1, 8, 1000)
expected_ec = _expected_ec_2d(R, Z)
z_threshold = Z[expected_ec < alpha][0]
if (verbose):
print('The Z-score threshold for FWER of %1.3f is %1.3f' %
(alpha, z_threshold))
"""Smooth and threshold the data array."""
X_smooth = _smooth(X, fwhm=fwhm)
X_thr = X_smooth > z_threshold
"""Compute the empirical Euler characteristic and find significant
elements."""
ec = _ec_2d(X_thr)
if (verbose):
print('The empirical Euler characteristic is %d' % ec)
return X_thr, X_smooth, ec
def plot_rft_2d(X, X_smooth, X_significant):
"""Function for visualizing the raw and smoothed data with significant
regions highlighted in red color.
Input arguments:
X : ndarray of floats
The original analyzed data array.
X_smooth : ndarray of floats
The data after spatial smoothing has been applied.
X_significant : ndarray of floats
The data after thresholding, showing which elements are significant.
TODO: let user provide data colormaps and plot only the boundaries of
the thresholded regions.
"""
sns.set_style('dark')
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(121)
ax.imshow(X, cmap='gray', origin='lower')
ax.imshow(X_significant, cmap='Reds', origin='lower', alpha=0.5)
ax.set_xlabel('Pixel X-coordinate')
ax.set_ylabel('Pixel Y-coordinate')
ax.set_title('Raw data')
ax = fig.add_subplot(122)
ax.imshow(X_smooth, cmap='gray', origin='lower')
ax.imshow(X_significant, cmap='Reds', origin='lower', alpha=0.5)
ax.set_title('Smoothed data')
fig.tight_layout()
plt.show()
|
puolival/multipy
|
multipy/rft.py
|
Python
|
bsd-3-clause
| 9,184
|
[
"Gaussian"
] |
008f2c18f73c40efd79faa26a15725fe87280227bd69afc83f97a213e7c9762d
|
from gevent import monkey
monkey.patch_all()
import os
import logging
import logging.config
from gevent.pool import Pool
from lxml import etree
from restkit import request
from restkit.globals import set_manager
from restkit.manager.mgevent import GeventManager
from urlparse import urljoin
from urlparse import urlparse
from urlparse import urlunparse
# set the gevent connection manager
set_manager(GeventManager(timeout=200))
def url_join(base, link):
if urlparse(link).netloc:
return link
join = urljoin(base, link)
url = urlparse(join)
path = os.path.normpath(url.path)
# Strip hashes, bug in restkit
return urlunparse(
(url.scheme, url.netloc, path, url.params, url.query, None)
)
def get_urls(base, html):
"""Given a string of html, return all the urls that are linked
"""
tree = etree.HTML(html)
links = tree.iterfind(".//a[@href]")
return [url_join(base, a.attrib["href"].strip()) for a in links]
LOG_CONFIG = {
"version": 1,
"formatters": {
"salesman": {
"format": ("[%(levelname)s] - %(status)s %(url)s "
"%(source)s %(message)s"),
},
"pretty-salesman": {
"format": ("%(levelname)s\tHTTP %(status)s\nURL\t%(url)s\nSOURCE"
"\t%(source)s\n\n"),
},
},
"handlers": {
"file": {
"level": "DEBUG",
"class": "logging.FileHandler",
"formatter": "salesman",
"filename": "travel.log",
},
"console": {
"level": "ERROR",
"class": "logging.StreamHandler",
"formatter": "pretty-salesman",
"stream": "ext://sys.stderr"
},
},
"loggers": {
"salesman": {
"level": "DEBUG",
"handlers": ["file", "console"],
},
}
}
class Salesman(object):
def __init__(self, externals=False, logger=None, log_config=None):
"""
:param externals: If True, check that links to external URLs are valid
:param logger: The logger to use
:param log_config: The log_config dictionary to use
:param verbose: If true, print out errors to stderror
"""
if log_config is None:
log_config = LOG_CONFIG
if logger is not None:
self.logger = logger
else:
logging.config.dictConfig(log_config)
self.logger = logging.getLogger("salesman")
self.externals = externals
self.visited_urls = set()
self.next_urls = []
def verify(self, *vurls):
for url in vurls:
self.base = urlparse(url)
urls = self.visit(url)
# Limit Pool size to 100 to prevent HTTP timeouts
pool = Pool(100)
def visit(url, source):
if not self.is_invalid(url):
self.visit(url, source)
for vurl, source in urls:
pool.spawn(visit, vurl, source)
pool.join()
def visit(self, url, source=None):
""" Visit the url and return the response
:return: The set of urls on that page
"""
self.visited_urls.add(url)
try:
response = request(url, follow_redirect=True)
except Exception as e:
return []
# Rest the url for redirects
url = response.final_url
# Add the new url to the set as well
self.visited_urls.add(url)
o = urlparse(url)
level = logging.INFO if response.status_int < 400 else logging.ERROR
plans = "VISIT" if o.netloc == self.base.netloc else "STOP"
d = {
"status": response.status,
"url": url,
"source": source,
}
self.logger.log(level, "%s", plans, extra=d)
if o.netloc != self.base.netloc:
return []
try:
return [(u, url) for u in get_urls(url, response.body_string())]
except Exception as e:
return []
def is_invalid(self, url):
return (url in self.visited_urls
or url.startswith("mailto:")
or url.startswith("javascript:"))
def explore(self, url):
"""Travel will never stop"""
self.visited_urls = set()
self.base = urlparse(url)
# Limit Pool size to 100 to prevent HTTP timeouts
pool = Pool(100)
def visit(target, source):
if not self.is_invalid(target):
for url, source in self.visit(target, source):
pool.apply_async(visit, args=[url, source])
pool.apply_async(visit, args=[url, None])
pool.join()
|
kyleconroy/salesman
|
salesman/__init__.py
|
Python
|
mit
| 4,734
|
[
"VisIt"
] |
8c3b050c9171f02153163b31b0f4c0492b961fd64cee3cdeafeb2b12a25d0059
|
"""Contains the various config storage classes"""
# config_storage.py
# Mission Pinball Framework Wizard
# Written by Brian Madden, Gabe Knuth & John Marsh
# Released under the MIT License. (See license info at the end of this file.)
from mpf.system.utility_functions import Util
class MPFConfigFile():
def __init__(self, filename, config):
self.filename = filename
self.config = config
self.child_files = dict()
def add_child_file(self, child_file):
self.child_files[child_file.filename] = child_file
def get_merged_config(self):
merged_config = self.config
for key in self.child_files:
merged_config = Util.dict_merge(merged_config, self.child_files[key].get_merged_config())
return merged_config
# The MIT License (MIT)
# Copyright (c) 2013-2016 Brian Madden, Gabe Knuth and the AUTHORS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
missionpinball/mpf-wizard
|
mpfwiz/config_storage.py
|
Python
|
mit
| 1,964
|
[
"Brian"
] |
9b64287fa12cc5a3eac354669c36e72dcb6754e3c2de9388b1c97cc8aaedadea
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2018-01-05 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/OXBUDS/0.5x0.5/v3/combined_sources_n-butane_1960-2020_v3_greg.nc'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='n-C4H10'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='n-butane surface emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_butane_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_n-butane_1960-2020_v3_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of n-butane from 1960 to 2020.'
ocube.attributes['File_version']='v3'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010; Helmig et al., Atmos. Environ., 2014.'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_nC4H10.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_nC4H10_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,154
|
[
"NetCDF"
] |
d6d8971d5b70135c5855870c6fe65a7d592611a6612dafdba029aca80bca888a
|
"""Setup tu build panISa program
Derived from setuptools based setup module
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='panisa', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.4', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='panISa is a software to search insertion sequence (IS) on resequencing data (bam file)', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
# long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/bvalot/panISa', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Benoit Valot', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='benoit.valot@univ-fcomte.fr', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='bioinformatic IS bacteria', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages('.', exclude=['script', 'validate']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pysam>=0.9', 'requests>=2.12'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
package_data={ # Optional
'sample': ['test.bam'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['test/test.bam'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
scripts=['panISa.py', 'ISFinder_search.py']
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
bvalot/panISa
|
setup.py
|
Python
|
gpl-3.0
| 6,696
|
[
"VisIt",
"pysam"
] |
b91d2983e86fcf8f49c1c3576a0523838301152e2ad551f9bbc891557c225345
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Satpy developers
#
# satpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# satpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satpy. If not, see <http://www.gnu.org/licenses/>.
"""EUMETSAT EPS-SG Visible/Infrared Imager (VII) Level 1B products reader.
The ``vii_l1b_nc`` reader reads and calibrates EPS-SG VII L1b image data in netCDF format. The format is explained
in the `EPS-SG VII Level 1B Product Format Specification V4A`_.
This version is applicable for the vii test data V2 to be released in Jan 2022.
.. _EPS-SG VII Level 1B Product Format Specification V4A: https://www.eumetsat.int/media/44393
"""
import logging
import numpy as np
from satpy.readers.vii_base_nc import ViiNCBaseFileHandler
from satpy.readers.vii_utils import C1, C2, MEAN_EARTH_RADIUS
logger = logging.getLogger(__name__)
class ViiL1bNCFileHandler(ViiNCBaseFileHandler):
"""Reader class for VII L1B products in netCDF format."""
def __init__(self, filename, filename_info, filetype_info, **kwargs):
"""Read the calibration data and prepare the class for dataset reading."""
super().__init__(filename, filename_info, filetype_info, **kwargs)
# Read the variables which are required for the calibration
self._bt_conversion_a = self['data/calibration_data/bt_conversion_a'].values
self._bt_conversion_b = self['data/calibration_data/bt_conversion_b'].values
self._channel_cw_thermal = self['data/calibration_data/channel_cw_thermal'].values
self._integrated_solar_irradiance = self['data/calibration_data/Band_averaged_solar_irradiance'].values
# Computes the angle factor for reflectance calibration as inverse of cosine of solar zenith angle
# (the values in the product file are on tie points and in degrees,
# therefore interpolation and conversion to radians are required)
solar_zenith_angle = self['data/measurement_data/solar_zenith']
solar_zenith_angle_on_pixels = self._perform_interpolation(solar_zenith_angle)
solar_zenith_angle_on_pixels_radians = np.radians(solar_zenith_angle_on_pixels)
self.angle_factor = 1.0 / (np.cos(solar_zenith_angle_on_pixels_radians))
def _perform_calibration(self, variable, dataset_info):
"""Perform the calibration.
Args:
variable: xarray DataArray containing the dataset to calibrate.
dataset_info: dictionary of information about the dataset.
Returns:
DataArray: array containing the calibrated values and all the original metadata.
"""
calibration_name = dataset_info['calibration']
if calibration_name == 'brightness_temperature':
# Extract the values of calibration coefficients for the current channel
chan_index = dataset_info['chan_thermal_index']
cw = self._channel_cw_thermal[chan_index]
a = self._bt_conversion_a[chan_index]
b = self._bt_conversion_b[chan_index]
# Perform the calibration
calibrated_variable = self._calibrate_bt(variable, cw, a, b)
calibrated_variable.attrs = variable.attrs
elif calibration_name == 'reflectance':
# Extract the values of calibration coefficients for the current channel
chan_index = dataset_info['chan_solar_index']
isi = self._integrated_solar_irradiance[chan_index]
# Perform the calibration
calibrated_variable = self._calibrate_refl(variable, self.angle_factor.data, isi)
calibrated_variable.attrs = variable.attrs
elif calibration_name == 'radiance':
calibrated_variable = variable
else:
raise ValueError("Unknown calibration %s for dataset %s" % (calibration_name, dataset_info['name']))
return calibrated_variable
def _perform_orthorectification(self, variable, orthorect_data_name):
"""Perform the orthorectification.
Args:
variable: xarray DataArray containing the dataset to correct for orthorectification.
orthorect_data_name: name of the orthorectification correction data in the product.
Returns:
DataArray: array containing the corrected values and all the original metadata.
"""
try:
orthorect_data = self[orthorect_data_name]
# Convert the orthorectification delta values from meters to degrees
# based on the simplified formula using mean Earth radius
variable += np.degrees(orthorect_data / MEAN_EARTH_RADIUS)
except KeyError:
logger.warning('Required dataset %s for orthorectification not available, skipping', orthorect_data_name)
return variable
@staticmethod
def _calibrate_bt(radiance, cw, a, b):
"""Perform the calibration to brightness temperature.
Args:
radiance: numpy ndarray containing the radiance values.
cw: center wavelength [μm].
a: temperature coefficient [-].
b: temperature coefficient [K].
Returns:
numpy ndarray: array containing the calibrated brightness temperature values.
"""
log_expr = np.log(1.0 + C1 / ((cw ** 5) * radiance))
bt_values = b + (a * C2 / (cw * log_expr))
return bt_values
@staticmethod
def _calibrate_refl(radiance, angle_factor, isi):
"""Perform the calibration to reflectance.
Args:
radiance: numpy ndarray containing the radiance values.
angle_factor: numpy ndarray containing the inverse of cosine of solar zenith angle [-].
isi: integrated solar irradiance [W/(m2 * μm)].
Returns:
numpy ndarray: array containing the calibrated reflectance values.
"""
refl_values = (np.pi / isi) * angle_factor * radiance * 100.0
return refl_values
|
pytroll/satpy
|
satpy/readers/vii_l1b_nc.py
|
Python
|
gpl-3.0
| 6,435
|
[
"NetCDF"
] |
f39b5fafaaf3f3ce18c715eb3d183d07e014e60d43e46af78c888ea8968215c0
|
import circulartextlayout
import ui
layout_text = '''
************
************
bbbbbbbbbbbb
************
i*i*i*i*i*i*
************
************
'''
image_list = [ ui.Image.named(i) for i in 'Rabbit_Face Mouse_Face Cat_Face Dog_Face Octopus Cow_Face'.split()]
_range_12 = (.3, .34, .38, .42, .46, .5, .55, .6, .63, .7, .85, 1.0)
def button_action(sender):
print('Button {} was pressed.'.format(sender.title))
titles = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
attributes = {'b': [{'action':button_action, 'font' :('Helvetica', 20),
'bg_color':'orange', 'alpha':_range_12[i],
'border_width':.5, 'text_color':'black', 'tint_color':'black',
'title':j } for i, j in enumerate(titles)],
'i': [{'image':i, 'bg_color':'gray'} for i in image_list ]
}
v = circulartextlayout.BuildView(layout_text, width=600, height=600, view_name='Counter',
attributes=attributes).build_view()
for i in range(1, len(titles)+1):
v['button'+str(i)].corner_radius = v['button'+str(i)].width*.5
for i in range(1, len(image_list)+1):
v['imageview'+str(i)].corner_radius = v['imageview'+str(i)].width*.5
v.present('popover')
|
balachandrana/textlayout
|
circular_test4.py
|
Python
|
mit
| 1,277
|
[
"Octopus"
] |
cfb86190242b7a30d67abf3f32fb39f3b03084697c3d71924fd870a30cd78e45
|
import url_file_read as provided
import math
import random
import project4
import matplotlib.pyplot as plt
protein_human = provided.read_protein(provided.HUMAN_EYELESS_URL)
protein_fly = provided.read_protein(provided.FRUITFLY_EYELESS_URL)
scoring_matrix = provided.read_scoring_matrix(provided.PAM50_URL)
"""
caluclate the score of alignment of human and fruit fly eyeless protein
"""
def align_human_fly_protein():
alignment_matrix = project4.compute_alignment_matrix(protein_human, protein_fly, scoring_matrix, False)
result = project4.compute_local_alignment(protein_human, protein_fly, scoring_matrix, alignment_matrix)
return result
#score, human_protein_aligned, fly_protein_aligned = align_human_fly_protein()
#print score
"""
calculate the ratio of similar the human of fly compare to consensus protein
"""
def calculate_similar_ratio():
result = align_human_fly_protein()
sequence_human = result[1].replace('-', '')
sequence_fly = result[2].replace('-', '')
protein_consensus = provided.read_protein(provided.CONSENSUS_PAX_URL)
alignment_matrix = project4.compute_alignment_matrix(sequence_human, protein_consensus, scoring_matrix, True)
result = project4.compute_global_alignment(sequence_human, protein_consensus, scoring_matrix, alignment_matrix)
mark = 0
for idx in range(len(result[1])):
if result[1][idx] == result[2][idx]:
mark += 1
print mark / float(len(result[1]))
protein_consensus = provided.read_protein(provided.CONSENSUS_PAX_URL)
alignment_matrix = project4.compute_alignment_matrix(sequence_fly, protein_consensus, scoring_matrix, True)
result = project4.compute_global_alignment(sequence_fly, protein_consensus, scoring_matrix, alignment_matrix)
mark = 0
for idx in range(len(result[1])):
if result[1][idx] == result[2][idx]:
mark += 1
print mark / float(len(result[1]))
#calculate_diff_ratio()
"""
ploting histogram about score of alignment to the disorder sequence
"""
def save_dict(dictionary):
dict_file = open("distribution.csv", "w")
for key, value in dictionary.items():
dict_file.write(str(key)+","+str(value)+"\n")
dict_file.close()
def read_dict(fname):
dict_file = open(fname, "r")
dictionary = {}
for line in dict_file:
line = line.strip()
key, value = line.split(",")
dictionary[int(key)] = int(value)
return dictionary
def generate_null_distribution(seq_x, seq_y, scoring_matrix, num_trials):
distribution = {}
bar = progressbar.ProgressBar(max_value=1000)
for progress in range(num_trials):
bar.update(progress)
rand_y = list(seq_y)
random.shuffle(rand_y)
alignment_matrix = project4.compute_alignment_matrix(seq_x, rand_y, scoring_matrix, False)
score = project4.compute_local_alignment(seq_x, rand_y, scoring_matrix, alignment_matrix)[0]
distribution[score] = distribution.get(score,0) + 1
save_dict(distribution)
return distribution
def plot_histogram():
READ = True
if READ:
dist =read_dict("distribution.csv")
else:
dist = generate_null_distribution(protein_human, protein_fly, scoring_matrix, 1000)
x = dist.keys()
y = dist.values()
y_normal = [idx/1000.0 for idx in y]
plt.bar(x, y_normal)
plt.title("Null distribution using 1000 trials")
plt.xlabel("Scores")
plt.ylabel("Fraction of trials")
plt.show()
#plot_histogram()
"""
calculate the mean and stdard deviation of the distribution of score over disorder sequence
"""
def cal_mean_stdv():
score_list = []
dist =read_dict("distribution.csv")
for score, appearance in dist.items():
score_list += [score] * appearance
mean = sum(score_list)/float(len(score_list))
stdv = math.sqrt(sum([(value - mean) ** 2 for value in score_list])/float(len(score_list)))
return mean, stdv
#print cal_mean_stdv()
"""
Spelling Checking
"""
word_list = provided.read_words(provided.WORD_LIST_URL)
def check_spelling(checked_word, dist, word_list):
# scoring matrix for edit distaion
# edit distance = |x| + |y| - score(X,Y)
# diag_socre = 2, off_diag_score = 1, dash_score = 0
alphabets = set("abcdefghijklmnopqrstuvwxyz")
scoring_matrix = project4.build_scoring_matrix(alphabets,2,1,0)
string_set = set([])
for word in word_list:
alignment_matrix = project4.compute_alignment_matrix(checked_word ,word, scoring_matrix, True)
score, _, _ = project4.compute_global_alignment(checked_word, word, scoring_matrix, alignment_matrix)
score = len(checked_word) + len(word) - score
if score <= dist:
string_set.add(word)
return string_set
def fast_check_spelling(checked_word, dist, word_list):
word_set = set(word_list)
align_set = set([])
for word in word_set:
if check_valid(checked_word, word, dist):
align_set.add(word)
return align_set
def check_valid(checked_word, word, dist):
if dist < 0:
return False
elif checked_word and word:
if checked_word[0] == word[0]:
return check_valid(checked_word[1:], word[1:], dist)
else:
return check_valid(checked_word, word[1:], dist - 1) or check_valid(checked_word[1:], word, dist - 1) or check_valid(checked_word[1:], word[1:], dist - 1)
elif checked_word:
if dist - len(checked_word) < 0:
return False
else:
return True
elif word:
if dist - len(word) < 0:
return False
else:
return True
else:
return True
#print fast_check_spelling("humble", 1, word_list)
#print fast_check_spelling("firefly", 2, word_list)
|
wchen1994/Alignments-of-Sequences
|
alignSeq/app4.py
|
Python
|
mit
| 5,765
|
[
"Firefly"
] |
d365766e0a43cccc675fb79807a065460ec9a89abe7be7306c8e482934e1ea5e
|
from server.lib.bottle import Bottle, debug,request,route, run, redirect
import json
import random
# use the Jinja templating system
from view_helper import JINJA_ENV
import urllib
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
def invoke_verify(solution,lan,tests=""):
url = "http://162.222.183.53/" + str(lan)
result = verify(solution, tests, url)
return result
def verify(solution, tests, url):
j = {"tests":tests, "solution":solution}
requestJSON = json.dumps(j)
result = verify_service(requestJSON,url)
return result
def verify_service(requestJSON, url):
params = urllib.urlencode({'jsonrequest': requestJSON})
deadline = 100
result = urlfetch.fetch(url=url,
payload=params,
method=urlfetch.POST,
deadline=deadline,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
return result.content
def random_name_generator():
# originally from: https://gist.github.com/1266756
# with some changes
# example output:
# "falling-late-violet-forest-d27b3"
adjs = [ "autumn", "hidden", "bitter", "misty", "silent", "empty", "dry", "dark",
"summer", "icy", "delicate", "quiet", "white", "cool", "spring", "winter",
"patient", "twilight", "dawn", "crimson", "wispy", "weathered", "blue",
"billowing", "broken", "cold", "damp", "falling", "frosty", "green",
"long", "late", "lingering", "bold", "little", "morning", "muddy", "old",
"red", "rough", "still", "small", "sparkling", "throbbing", "shy",
"wandering", "withered", "wild", "black", "young", "holy", "solitary",
"fragrant", "aged", "snowy", "proud", "floral", "restless", "divine",
"polished", "ancient", "purple", "lively", "nameless"
]
nouns = [ "waterfall", "river", "breeze", "moon", "rain", "wind", "sea", "morning",
"snow", "lake", "sunset", "pine", "shadow", "leaf", "dawn", "glitter",
"forest", "hill", "cloud", "meadow", "sun", "glade", "bird", "brook",
"butterfly", "bush", "dew", "dust", "field", "fire", "flower", "firefly",
"feather", "grass", "haze", "mountain", "night", "pond", "darkness",
"snowflake", "silence", "sound", "sky", "shape", "surf", "thunder",
"violet", "water", "wildflower", "wave", "water", "resonance", "sun",
"wood", "dream", "cherry", "tree", "fog", "frost", "voice", "paper",
"frog", "smoke", "star"
]
hex = "0123456789abcdef"
return (random.choice(adjs) + "-" + random.choice(nouns) + "-" + random.choice(hex) + random.choice(hex) + random.choice(hex))
|
lamkeewei/battleships
|
server/controllers/Utility.py
|
Python
|
apache-2.0
| 2,726
|
[
"Firefly"
] |
4fa4cd98f24bb8bb97caf6ae8e2ac9d7333ec6d4757842bf8b3cd0b221019f35
|
from collections import namedtuple
from bottle_utils.i18n import lazy_gettext as _
__all__ = ('LBAND', 'KUBAND', 'L_PRESETS', 'KU_PRESETS', 'PRESETS')
Preset = namedtuple('Preset', ('label', 'index', 'values'))
LBAND = 'l'
KUBAND = 'ku'
L_PRESETS = [
# Translators, name of the L-band tuner preset covering AF-EU-ME
Preset(_('Africa-Europe-Middle East (25E)'), 1, {
'frequency': '1545.94',
'uncertainty': '4000',
'symbolrate': '4200',
'sample_rate': '1',
'rf_filter': '20',
'descrambler': True,
# Translators, used as coverage area of a transponder
'coverage': _('Africa, Europe, Middle East'),
}),
# Translators, name of the L-band tuner preset covering the Americas
Preset(_('North and South America (98W)'), 2, {
'frequency': '1539.8725',
'uncertainty': '4000',
'symbolrate': '4200',
'sample_rate': '1',
'rf_filter': '20',
'descrambler': True,
# Translators, used as coverage area of a transponder
'coverage': _('North and South America'),
}),
# Translators, name of the L-band tuner preset covering Asia-Pacific
Preset(_('Asia-Pacific (144E)'), 3, {
'frequency': '1545.9525',
'uncertainty': '4000',
'symbolrate': '4200',
'sample_rate': '1',
'rf_filter': '20',
'descrambler': True,
# Translators, used as coverage area of a transponder
'coverage': _('Asia, Oceania'),
}),
]
KU_PRESETS = [
Preset('Galaxy 19 (97.0W)', 1, {
'frequency': '11929',
'symbolrate': '22000',
'polarization': 'v',
'delivery': 'DVB-S',
'modulation': 'QPSK',
# Translators, used as coverage area of a transponder
'coverage': _('North America'),
}),
Preset('Hotbird 13 (13.0E)', 2, {
'frequency': '11471',
'symbolrate': '27500',
'polarization': 'v',
'delivery': 'DVB-S',
'modulation': 'QPSK',
# Translators, used as coverage area of a transponder
'coverage': _('Europe, North Africa'),
}),
Preset('Intelsat 20 (68.5E)', 3, {
'frequency': '12522',
'symbolrate': '27500',
'polarization': 'v',
'delivery': 'DVB-S',
'modulation': 'QPSK',
# Translators, used as coverage area of a transponder
'coverage': _('North and West Europe, Subsaharan Africa'),
}),
Preset('AsiaSat 5 C-band (100.5E)', 4, {
'frequency': '3960',
'symbolrate': '30000',
'polarization': 'h',
'delivery': 'DVB-S',
'modulation': 'QPSK',
# Translators, used as coverage area of a transponder
'coverage': _('Middle East, Asia, Australia'),
}),
Preset('Eutelsat (113.0W)', 5, {
'frequency': '12089',
'symbolrate': '11719',
'polarization': 'h',
'delivery': 'DVB-S',
'modulation': 'QPSK',
# Translators, used as coverage area of a transponder
'coverage': _('North, Middle, and South America'),
}),
Preset('ABS-2 (74.9E)', 6, {
'frequency': '11734',
'symbolrate': '44000',
'polarization': 'h',
'delivery': 'DVB-S',
'modulation': 'QPSK',
# Translators, used as coverage area of a transponder
'coverage': _('India'),
}),
]
PRESETS = {
LBAND: L_PRESETS,
KUBAND: KU_PRESETS,
}
|
Outernet-Project/librarian
|
librarian/data/tuner_presets.py
|
Python
|
gpl-3.0
| 3,433
|
[
"Galaxy"
] |
d5bb2e87e2daa1c001a8f649d1fe2be6c13e28b3963dc302cdad194e517563ea
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
import setuptools
from distutils.command.clean import clean as _clean
from distutils.command.build import build as _build
from setuptools.command.sdist import sdist as _sdist
from setuptools.command.build_ext import build_ext as _build_ext
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(filename):
with open(os.path.join(os.getcwd(),
'requirements',
filename)) as fp:
return filter(None, [strip_comments(l)
for l in fp.readlines()])
setup_ext = {}
if os.path.isfile('gulpfile.js'):
# 如果 gulpfile.js 存在, 就压缩前端代码
def gulp_build(done=[]):
if not done:
if os.system('npm install '
'--disturl=https://npm.taobao.org/dist '
'--registry=https://registry.npm.taobao.org'):
sys.exit(1)
if os.system('bower install'):
sys.exit(1)
if os.system('gulp build'):
sys.exit(1)
done.append(1)
def gulp_clean(done=[]):
if not done:
if os.system('npm install '
'--disturl=https://npm.taobao.org/dist '
'--registry=https://registry.npm.taobao.org'):
sys.exit(1)
if os.system('gulp clean'):
sys.exit(1)
done.append(1)
class build(_build):
sub_commands = _build.sub_commands[:]
# force to build ext
for ix, (name, checkfunc) in enumerate(sub_commands):
if name == 'build_ext':
sub_commands[ix] = (name, lambda self: True)
class build_ext(_build_ext):
def run(self):
gulp_build()
_build_ext.run(self)
class sdist(_sdist):
def run(self):
gulp_build()
_sdist.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
gulp_clean()
setup_ext = {'cmdclass': {'sdist': sdist,
'clean': clean,
'build': build,
'build_ext': build_ext}}
setup_params = dict(
name="qsapp-riitc",
url="http://wiki.yimiqisan.com/",
version='1.0',
author="qisan",
author_email="qisanstudio@gmail.com",
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=reqs('install.txt'))
setup_params.update(setup_ext)
if __name__ == '__main__':
setuptools.setup(**setup_params)
|
qisanstudio/qsapp-riitc
|
setup.py
|
Python
|
mit
| 2,804
|
[
"GULP"
] |
c538f5ef8bf59c66955da7543f58a1fbf50e904314a27eb1a4daaa907fb321be
|
import h5py
import sys
import os.path as op
from fos import *
import numpy as np
a=np.loadtxt(op.join(op.dirname(__file__), "..", "data", "rat-basal-forebrain.swc") )
pos = a[:,2:5].astype( np.float32 )
radius = a[:,5].astype( np.float32 ) * 4
# extract parent connectivity and create full connectivity
parents = a[1:,6] - 1
parents = parents.astype(np.uint32).T
connectivity = np.vstack( (parents, np.arange(1, len(parents)+1) ) ).T.astype(np.uint32)
#colors = np.random.random( ( (len(connectivity)/2, 4)) )
#colors[:,3] = 1.0
colors = np.random.rand( len(connectivity), 4, 500 ).astype( np.float32 )
colors[:,3] = 1.0
w = Window( dynamic = True )
scene = Scene( scenename = "Main" )
act = DynamicSkeleton( name = "Neuron",
vertices = pos,
connectivity = connectivity,
connectivity_colors=colors) #, radius = radius)
scene.add_actor( act )
w.add_scene( scene )
w.refocus_camera()
act.play()
|
fos/fos
|
examples/dynamic/neuron.py
|
Python
|
bsd-3-clause
| 950
|
[
"NEURON"
] |
d71b0de75cef532b02ac814dc133bf8ef4e73fabd7dff75101d40af7617fc7a4
|
## @package SparseTransformer
# Module caffe2.experiments.python.SparseTransformer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
import scipy.sparse
class NetDefNode():
def __init__(self, name, optype, p=None, op=None):
self.name = name
self.optype = optype
self.ops = {}
self.prev = {}
self.insertInput(p)
self.visited = False
self.op = op
def insertInput(self, p):
"""
Insert input of this op
also maintain the output of previous op
p: a node or a list of node
"""
if isinstance(p, list):
for i in p:
self.prev[i.name] = i
i.ops[self.name] = self
elif isinstance(p, NetDefNode):
self.prev[p.name] = p
p.ops[self.name] = self
def deleteInput(self, p):
if isinstance(p, NetDefNode):
del self.prev[p.name]
del p.ops[self.name]
def maskNallocate(weight_name):
"""
Combine mask and weights
create wcsr, iw, jw, return their names
"""
w = workspace.FetchBlob(weight_name)
w_csr = scipy.sparse.csr_matrix(w)
wcsr = w_csr.data
iw = w_csr.indptr
jw = w_csr.indices
workspace.FeedBlob(weight_name + "wcsr", wcsr)
workspace.FeedBlob(weight_name + "iw", iw)
workspace.FeedBlob(weight_name + "jw", jw)
return weight_name + "wcsr", weight_name + "iw", weight_name + "jw"
def transFCRelu(cur, id2node, name2id, ops, model):
"""
Add trans before and after this FC_Prune->(Relu)->FC_Prune chain.
"""
# 1. add trans before the start of this chain
# assuming that cur is a FC_Prune, and it has only one input
pre = cur.prev.itervalues().next()
# Create a node /op and insert it.
# TODO(wyiming): check whether it is correct here
current_blob = model.Transpose(cur.op.input[0], cur.op.input[0] + "_trans")
# print model.net.Proto()
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(trans_op.output[0], "Transpose", pre, trans_op)
trans_node.visited = True
pre_new = trans_node
# 2. use while loop to visit the chain
while True:
# breakup with the parent
cur.deleteInput(pre)
if not (cur.optype == "FC_Prune" or cur.optype == "Relu"):
print("Reaching the end of the chain")
break
if len(cur.ops) > 1:
print("A FC/Relu giving more than 1 useful outputs")
if cur.optype == "FC_Prune":
op = cur.op
wcsr, iw, jw = maskNallocate(op.input[1])
bias_name = op.input[3]
# TODO(wyiming): create a new Op here
current_blob = model.FC_Sparse(current_blob,
cur.op.output[0] + "_Sparse",
wcsr, iw, jw, bias_name)
sps_op = model.net.Proto().op[-1]
sps_node = NetDefNode(cur.op.output[0] + "_Sparse",
"FC_Sparse",
pre_new, sps_op)
sps_node.visited = True
pre_new = sps_node
if cur.optype == "Relu":
op = cur.op
current_blob = model.Relu(current_blob, current_blob)
rel_op = model.net.Proto().op[-1]
rel_node = NetDefNode(str(current_blob), "Relu",
pre_new, rel_op)
rel_node.visited = True
pre_new = rel_node
cur.visited = True
pre = cur
flag = False
for _, temp in cur.ops.iteritems():
if temp.optype == "Relu" or temp.optype == "FC_Prune":
flag = True
cur = temp
if not flag:
# assume that there is only 1 output that is not PrintOP
cur = cur.ops.itervalues().next()
cur.deleteInput(pre)
print("No FC/RElu children")
print(cur.op.type)
break
# 3. add trans after this chain like 1.
current_blob = model.Transpose(current_blob, pre.op.output[0])
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(str(current_blob), "Transpose", pre_new, trans_op)
trans_node.visited = True
cur.insertInput(trans_node)
print(cur.prev)
print(trans_node.ops)
def Prune2Sparse(cur, id2node, name2id, ops, model):
# Assume that FC and Relu takes in only 1 input;
# If not raise warning
if not cur.visited and cur.optype == "FC_Prune":
transFCRelu(cur, id2node, name2id, ops, model)
cur.visited = True
for name, n in cur.ops.iteritems():
Prune2Sparse(n, id2node, name2id, ops, model)
def net2list(net_root):
"""
Use topological order(BFS) to print the op of a net in a list
"""
bfs_queue = []
op_list = []
cur = net_root
for _, n in cur.ops.iteritems():
bfs_queue.append(n)
while bfs_queue:
node = bfs_queue[0]
bfs_queue = bfs_queue[1:]
op_list.append(node.op)
for _, n in node.ops.iteritems():
bfs_queue.append(n)
return op_list
def netbuilder(model):
print("Welcome to model checker")
proto = model.net.Proto()
net_name2id = {}
net_id2node = {}
net_root = NetDefNode("net_root", "root", None)
for op_id, op in enumerate(proto.op):
if op.type == "Print":
continue
op_name = '%s/%s (op#%d)' % (op.name, op.type, op_id) \
if op.name else '%s (op#%d)' % (op.type, op_id)
# print(op_name)
op_node = NetDefNode(op_name, op.type, op=op)
net_id2node[op_id] = op_node
if_has_layer_input = False
for input_name in op.input:
if input_name not in net_name2id:
# assume that un_occured name are non_layers
# TODO: write a non-layer checker and log it
continue
op_node.insertInput(net_id2node[net_name2id[input_name]])
if_has_layer_input = True
if not if_has_layer_input:
op_node.insertInput(net_root)
for output_name in op.output:
net_name2id[output_name] = op_id
return net_root, net_name2id, net_id2node
|
xzturn/caffe2
|
caffe2/experiments/python/SparseTransformer.py
|
Python
|
apache-2.0
| 6,362
|
[
"VisIt"
] |
288a4bf9f147a79d349d25a3f0baec2dc4dc550b7c6147212914ac08287612a9
|
""" Tests generating files for qm torsion scan """
import unittest
from torsionfit.tests.utils import get_fn, has_openeye
import torsionfit.qmscan.torsion_scan as qmscan
from torsionfit.qmscan import utils
from openmoltools import openeye
import tempfile
import os
from fnmatch import fnmatch
import shutil
class TestQmscan(unittest.TestCase):
@unittest.skipUnless(has_openeye, 'Cannot test without openeye')
def test_generat_torsions(self):
""" Tests finding torsion to drive """
from openeye import oechem
infile = get_fn('butane.pdb')
ifs = oechem.oemolistream(infile)
inp_mol = oechem.OEMol()
oechem.OEReadMolecule(ifs, inp_mol)
outfile_path = tempfile.mkdtemp()[1]
qmscan.generate_torsions(inp_mol=inp_mol, output_path=outfile_path, interval=30, tar=False)
input_files = []
pattern = '*.pdb'
for path, subdir, files in os.walk(outfile_path):
for name in files:
if fnmatch(name, pattern):
input_files.append(os.path.join(path, name))
contents = open(input_files[0]).read()
pdb = get_fn('butane_10_7_4_3_0.pdb')
compare_contents = open(pdb).read()
self.assertEqual(contents, compare_contents )
shutil.rmtree(outfile_path)
def test_generate_input(self):
"""Test generate psi4 input files"""
root = get_fn('torsion_scan/10_7_4_3')
qmscan.generate_scan_input(root, 'pdb', 'butane', ['MP2'], ['aug-cc-pvtz'], symmetry='C1')
contents = open(get_fn('torsion_scan/10_7_4_3/0/butane_10_7_4_3_0.dat')).read()
compare_content = open(get_fn('butane_10_7_4_3_0.dat')).read()
self.assertEqual(contents, compare_content)
@unittest.skipUnless(has_openeye, 'Cannot test without OpenEye')
def test_tagged_smiles(self):
"""Test index-tagges smiles"""
from openeye import oechem
inf = get_fn('ethylmethylidyneamonium.mol2')
ifs = oechem.oemolistream(inf)
inp_mol = oechem.OEMol()
oechem.OEReadMolecule(ifs, inp_mol)
tagged_smiles = utils.create_mapped_smiles(inp_mol)
# Tags should always be the same as mol2 molecule ordering
self.assertEqual(tagged_smiles, '[H:5][C:1]#[N+:4][C:3]([H:9])([H:10])[C:2]([H:6])([H:7])[H:8]')
@unittest.skipUnless(has_openeye, "Cannot test without OpneEye")
def test_atom_map(self):
"""Test get atom map"""
from openeye import oechem
tagged_smiles = '[H:5][C:1]#[N+:4][C:3]([H:9])([H:10])[C:2]([H:6])([H:7])[H:8]'
mol_1 = openeye.smiles_to_oemol('CC[N+]#C')
inf = get_fn('ethylmethylidyneamonium.mol2')
ifs = oechem.oemolistream(inf)
mol_2 = oechem.OEMol()
oechem.OEReadMolecule(ifs, mol_2)
atom_map = utils.get_atom_map(tagged_smiles, mol_1)
for i, mapping in enumerate(atom_map):
atom_1 = mol_1.GetAtom(oechem.OEHasAtomIdx(atom_map[mapping]))
atom_1.SetAtomicNum(i+1)
atom_2 = mol_2.GetAtom(oechem.OEHasAtomIdx(mapping-1))
atom_2.SetAtomicNum(i+1)
self.assertEqual(oechem.OECreateCanSmiString(mol_1), oechem.OECreateCanSmiString(mol_2))
# Test aromatic molecule
tagged_smiles = '[H:10][c:4]1[c:3]([c:2]([c:1]([c:6]([c:5]1[H:11])[H:12])[C:7]([H:13])([H:14])[H:15])[H:8])[H:9]'
mol_1 = openeye.smiles_to_oemol('Cc1ccccc1')
inf = get_fn('toluene.mol2')
ifs = oechem.oemolistream(inf)
mol_2 = oechem.OEMol()
oechem.OEReadMolecule(ifs, mol_2)
atom_map = utils.get_atom_map(tagged_smiles, mol_1)
for i, mapping in enumerate(atom_map):
atom_1 = mol_1.GetAtom(oechem.OEHasAtomIdx(atom_map[mapping]))
atom_1.SetAtomicNum(i+1)
atom_2 = mol_2.GetAtom(oechem.OEHasAtomIdx(mapping-1))
atom_2.SetAtomicNum(i+1)
self.assertEqual(oechem.OECreateCanSmiString(mol_1), oechem.OECreateCanSmiString(mol_2))
@unittest.skipUnless(has_openeye, "Cannot test without OpenEye")
def test_atom_map_order(self):
"""Test atom map"""
from openeye import oechem
tagged_smiles = '[H:5][C:1]#[N+:4][C:3]([H:9])([H:10])[C:2]([H:6])([H:7])[H:8]'
mol_from_tagged_smiles = openeye.smiles_to_oemol(tagged_smiles)
atom_map = utils.get_atom_map(tagged_smiles, mol_from_tagged_smiles)
# Compare atom map to tag
for i in range(1, len(atom_map) +1):
atom_1 = mol_from_tagged_smiles.GetAtom(oechem.OEHasAtomIdx(atom_map[i]))
self.assertEqual(i, atom_1.GetMapIdx())
@unittest.skipUnless(has_openeye, "Cannot test without OpneEye")
def test_mapped_xyz(self):
"""Test writing out mapped xyz"""
from openeye import oechem, oeomega
tagged_smiles = '[H:10][c:4]1[c:3]([c:2]([c:1]([c:6]([c:5]1[H:11])[H:12])[C:7]([H:13])([H:14])[H:15])[H:8])[H:9]'
mol_1 = openeye.smiles_to_oemol('Cc1ccccc1')
inf = get_fn('toluene.mol2')
ifs = oechem.oemolistream(inf)
mol_2 = oechem.OEMol()
oechem.OEReadMolecule(ifs, mol_2)
atom_map = utils.get_atom_map(tagged_smiles, mol_1)
for i, mapping in enumerate(atom_map):
atom_1 = mol_1.GetAtom(oechem.OEHasAtomIdx(atom_map[mapping]))
atom_1.SetAtomicNum(i+1)
atom_2 = mol_2.GetAtom(oechem.OEHasAtomIdx(mapping-1))
atom_2.SetAtomicNum(i+1)
xyz_1 = utils.to_mapped_xyz(mol_1, atom_map)
# molecule generated from mol2 should be in the right order.
atom_map_mol2 = {1:0, 2:1, 3:2, 4:3, 5:4, 6:5, 7:6, 8:7, 9:8, 10:9, 11:10, 12:11, 13:12, 14:13, 15:14}
xyz_2 = utils.to_mapped_xyz(mol_2, atom_map_mol2)
for ele1, ele2 in zip(xyz_1.split('\n')[:-1], xyz_2.split('\n')[:-1]):
self.assertEqual(ele1.split(' ')[2], ele2.split(' ')[2])
|
choderalab/Torsions
|
torsionfit/tests/test_qmscan.py
|
Python
|
gpl-2.0
| 5,916
|
[
"Psi4"
] |
8fafa06ce76f9018b9217b110728509a4e03a109073be3e98617971637e31cd9
|
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import sys,os,re
import requests
import subprocess
from wherehows.common import Constant
from wherehows.common.schemas import SCMOwnerRecord
from wherehows.common.writers import FileWriter
from org.slf4j import LoggerFactory
class CodeSearchExtract:
"""
Lists all repos for oracle & espresso databases. Since this feature is not
available through the UI, we need to use http://go/codesearch to discover
the multiproduct repos that use 'li-db' plugin.
"""
# verbose = False
limit_search_result = 500
# limit_multiproduct = None
# limit_plugin = None
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.base_url = args[Constant.BASE_URL_KEY]
self.code_search_committer_writer = FileWriter(args[Constant.DATABASE_SCM_REPO_OUTPUT_KEY])
def run(self):
offset_min = 1
offset_max = 100
databases = []
search_request = \
{"request":
{
"other":{"CurrentResult":str(offset_min),"requestTimeout":"200000000"},
"queryContext":{"numToScore":1000,"docDataSet":"results","rawQuery":"type:gradle plugin:*'li-db'"},
"paginationContext":{"numToReturn":offset_max}
}
}
while True:
resp = requests.post(self.base_url + '/galene-codesearch?action=search',
json=search_request,
verify=False)
if resp.status_code != 200:
# This means something went wrong.
d = resp.json()
self.logger.info("Request Error! Stack trace {}".format(d['stackTrace']))
# raise Exception('Request Error', 'POST /galene-codesearch?action=search %s' % (resp.status_code))
break
result = resp.json()['value']
self.logger.debug("Pagination offset = {}".format(result['total']))
for element in result['elements']:
fpath = element['docData']['filepath']
ri = fpath.rindex('/')
prop_file = fpath[:ri] + '/database.properties'
# e.g. identity-mt/database/Identity/database.properties
# network/database/externmembermap/database.properties
# cap-backend/database/campaigns-db/database.properties
try:
databases.append( {'filepath': prop_file, 'app_name': element['docData']['mp']} )
except:
self.logger.error("Exception happens with prop_file {}".format(prop_file))
if result['total'] < 100:
break
offset_min += int(result['total'])
offset_max += 100 # if result['total'] < 100 else result['total']
search_request['request']['other']['CurrentResult'] = str(offset_min)
search_request['request']['paginationContext']['numToReturn'] = offset_max
self.logger.debug("Property file path {}".format(search_request))
self.logger.debug(" length of databases is {}".format(len(databases)))
owner_count = 0
committers_count = 0
for db in databases:
prop_file = db['filepath']
file_request = \
{"request":{
"other":{"filepath":prop_file,
"TextTokenize":"True",
"CurrentResult":"1",
"requestTimeout":"2000000000"
},
"queryContext":{"numToScore":10,"docDataSet":"result"},
"paginationContext":{"numToReturn":1}
}
}
resp = requests.post(self.base_url + '/galene-codesearch?action=search',
json=file_request,
verify=False)
if resp.status_code != 200:
# This means something went wrong.
d = resp.json()
self.logger.info("Request Error! Stack trace {}".format(d['stackTrace']))
continue
result = resp.json()['value']
if result['total'] < 1:
self.logger.info("Nothing found for {}".format(prop_file))
continue
if "repoUrl" in result['elements'][0]['docData']:
db['scm_url'] = result['elements'][0]['docData']['repoUrl']
db['scm_type'] = result['elements'][0]['docData']['repotype']
db['committers'] = ''
if db['scm_type'] == 'SVN':
schema_in_repo = re.sub(r"http://(\w+)\.([\w\.\-/].*)database.properties\?view=markup",
"http://svn." + r"\2" + "schema", db['scm_url'])
db['committers'] = self.get_svn_committers(schema_in_repo)
committers_count +=1
self.logger.info("Committers for {} => {}".format(schema_in_repo,db['committers']))
else:
self.logger.info("Search request {}".format(prop_file))
code = result['elements'][0]['docData']['code']
try:
code_dict = dict(line.split("=", 1) for line in code.strip().splitlines())
db['database_name'] = code_dict['database.name']
db['database_type'] = code_dict['database.type']
owner_record = SCMOwnerRecord(
db['scm_url'],
db['database_name'],
db['database_type'],
db['app_name'],
db['filepath'],
db['committers'],
db['scm_type']
)
owner_count += 1
self.code_search_committer_writer.append(owner_record)
except Exception as e:
self.logger.error(e)
self.logger.error("Exception happens with code {}".format(code))
self.code_search_committer_writer.close()
self.logger.info('Finish Fetching committers, total {} committers entries'.format(committers_count))
self.logger.info('Finish Fetching SVN owners, total {} records'.format(owner_count))
def get_svn_committers(self, svn_repo_path):
"""Collect recent committers from the cmd
svn log %s | grep '^\(A=\|r[0-9]* \)' | head -10
e.g.
r1617887 | htang | 2016-09-21 14:27:40 -0700 (Wed, 21 Sep 2016) | 12 lines
A=shanda,pravi
r1600397 | llu | 2016-08-08 17:14:22 -0700 (Mon, 08 Aug 2016) | 3 lines
A=rramakri,htang
"""
#svn_cmd = """svn log %s | grep '^\(A=\|r[0-9]* \)' | head -10"""
committers = []
possible_svn_paths = [svn_repo_path, svn_repo_path + "ta"]
for svn_repo_path in possible_svn_paths:
p = subprocess.Popen('svn log ' + svn_repo_path + " |grep '^\(A=\|r[0-9]* \)' |head -10",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
svn_log_output, svn_log_err = p.communicate()
if svn_log_err[:12] == 'svn: E160013':
continue # try the next possible path
for line in svn_log_output.split('\n'):
if re.match(r"r[0-9]+", line):
committer = line.split('|')[1].strip()
if committer not in committers:
committers.append(committer)
elif line[:2] == 'A=':
for apvr in line[2:].split(','):
if apvr not in committers:
committers.append(apvr)
if len(committers) > 0:
self.logger.debug(" {}, ' => ', {}".format(svn_repo_path,committers))
break
return ','.join(committers)
if __name__ == "__main__":
args = sys.argv[1]
e = CodeSearchExtract()
e.run()
|
thomas-young-2013/wherehowsX
|
metadata-etl/src/main/resources/jython/CodeSearchExtract.py
|
Python
|
apache-2.0
| 8,557
|
[
"ESPResSo"
] |
e848ed7506bfa69911b3fac582260fa177a646a7477260443389a241eb069cf6
|
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
import itertools
import ctc_loss
import os
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
# words={}
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
# for w,d in zip(word_list,def_list):
# if w not in words:
# words[w]=[]
# words[w].append(d)
# word_list=[]
# def_list=[]
# for word in words:
# word_list.append(word)
# # def_list.append(random.choice(words[word]))
# def_list.append(words[word][0])
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
# pkl.dump(_map,open('mapaoh.pkl','wb'))
# pkl.dump(rev_map,open('rev_mapaoh.pkl','wb'))
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
# exit()
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# # y = (36665, 56210)
# # print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# np.save('Xaoh',X)
# np.save('yaoh',y)
# np.save('maskaoh',mask)
X=np.load('Xaoh.npy','r')
y=np.load('yaoh.npy','r')
mask=np.load('maskaoh.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num)
# num=int(num)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
# i+=1
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
print (len(rev_map.keys()))
print(len(_map.keys()))
print ('heyo')
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
num_failed=0
num_counted=0
for word in corpus:
w=word.lower()
num_counted+=1
if w not in _map:
num_failed+=1
mapped=_map[w]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
print 'fuck',num_failed/float(num_counted)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
if global_step is None:
global_step=tf.Variable(0,trainiable=False)
self.global_step=global_step
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.int32, [None],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [None,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.int32, [None, network_architecture["maxlen"]],name='caption_placeholder')
print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
self.timestep=tf.placeholder(tf.float32,[],name='timestep')
# Create autoencoder network
to_restore=None
with tf.device('/cpu:0'):
self.embw=tf.Variable(xavier_init(network_architecture['n_input'],network_architecture['n_z']),name='embw')
self.embb=tf.Variable(tf.zeros([network_architecture['n_z']]),name='embb')
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
if embeddings_trainable:
self.saver = tf.train.Saver(var_list=to_restore,max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path)
else:
self.saver= tf.train.Saver(var_list=self.untrainable_variables,max_to_keep=100)
mod_path=model_path
if use_ctc:
mod_path=mod_path[:-3]
saved_path=tf.train.latest_checkpoint(mod_path.replace('defdef','embtransfer'))
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, saved_path)
self.saver=tf.train.Saver(max_to_keep=100)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
self.embedded_input_KLD_loss=tf.constant(0.0)
self.input_embedding_KLD_loss=tf.constant(0.0)
self.deb=tf.constant(0)
self.input_KLD_loss=tf.constant(0.0)
def train_encoder():
embedded_input,embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[-1]),logit=True)
embedded_input=tf.reshape(embedded_input,[-1,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']])
if not vanilla:
embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:]
encoder_input=embedded_input[:,1:,:]
cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
cell=tf.contrib.rnn.MultiRNNCell([cell]*lstm_stack)
if not use_bdlstm:
encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
else:
backward_cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
backward_cell=tf.contrib.rnn.MultiRNNCell([backward_cell]*lstm_stack)
encoder_outs,encoder_states=rnn.bidirectional_dynamic_rnn(cell,backward_cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
ix_range=tf.range(0,self.batch_size,1)
ixs=tf.expand_dims(ix_range,-1)
to_cat=tf.expand_dims(seqlen-2,-1)
gather_inds=tf.concat([ixs,to_cat],axis=-1)
print encoder_outs
outs=tf.gather_nd(encoder_outs,gather_inds)
outs=tf.nn.dropout(outs,.75)
self.deb=tf.gather_nd(self.caption_placeholder[:,1:],gather_inds)
print outs.shape
input_embedding,input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True)
return [input_embedding,tf.constant(0.0),embedded_input_KLD_loss,input_embedding_KLD_loss]
# input_embedding=tf.nn.l2_normalize(input_embedding,dim=-1)
self.other_loss=tf.constant(0,dtype=tf.float32)
KLD_penalty=tf.tanh(tf.cast(self.timestep,tf.float32)/1.0)
cos_penalty=tf.maximum(-0.1,tf.tanh(tf.cast(self.timestep,tf.float32)/(5.0)))
def train_decoder():
if form3:
_x,input_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['variational_encoding'])
input_KLD_loss=tf.reduce_mean(input_KLD_loss)*KLD_penalty#*tf.constant(0.0,dtype=tf.float32)
# normed_embedding= tf.nn.l2_normalize(self.mid_var, dim=-1)
# normed_target=tf.nn.l2_normalize(self.word_var,dim=-1)
# cos_sim=(tf.reduce_sum(tf.multiply(normed_embedding,normed_target),axis=-1))
# # # self.exp_loss=tf.reduce_mean((-cos_sim))
# # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# self.other_loss += tf.reduce_mean(-(cos_sim))*cos_penalty
# other_loss+=tf.reduce_mean(tf.reduce_sum(tf.square(_x-input_embedding),axis=-1))*cos_penalty
return [_x,input_KLD_loss,tf.constant(0.0),tf.constant(0.0)]
input_embedding,self.input_KLD_loss,self.embedded_input_KLD_loss,self.input_embedding_KLD_loss=tf.cond(tf.equal(self.timestep%5,0),train_decoder,train_encoder)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
# if not same_embedding:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
# else:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
if not embeddings_trainable:
input_embedding=tf.stop_gradient(input_embedding)
# embed2decoder=tf.Variable(xavier_init(self.network_architecture['n_z_m_2'],self.network_architecture['n_lstm_input']),name='decoder_embedding_weight')
# embed2decoder_bias=tf.Variable(tf.zeros(self.network_architecture['n_lstm_input']),name='decoder_embedding_bias')
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
# input_embedding=tf.matmul(input_embedding,embed2decoder)+embed2decoder_bias
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form4:
current_embedding,KLD_loss=input_embedding,0
elif form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not use_ctc:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
self.debug+=xentropy
loss += xentropy
else:
probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
self.debug=[self.input_KLD_loss,tf.reduce_mean(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty,self.other_loss,KLD_penalty]
if not use_ctc:
loss_ctc=0
# self.debug=other_loss
# self.debug=[input_KLD_loss,embedded_input_KLD_loss,input_embedding_KLD_loss]
else:
probs=tf.concat(probs,axis=1)
probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:])
loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1)
self.debug=loss_ctc
#
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty+tf.reduce_sum(self.embedded_input_KLD_loss*self.mask[:,1:])/tf.reduce_sum(self.mask[:,1:])*KLD_penalty+loss_ctc+self.input_KLD_loss+self.other_loss
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if form3:
n_in=n_z
else:
n_in=n_input
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight',trainable=embeddings_trainable),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias',trainable=embeddings_trainable)}
# if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab',trainable=embeddings_trainable)}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='out_log_sigma',trainable=embeddings_trainable),
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')
}
# else:
# all_weights['biases_variational_encoding'] = {
# 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable)}
# all_weights['variational_encoding'] = {
# 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable),
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')}
self.untrainable_variables=all_weights['input_meaning'].values()+all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
if mid_vae:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_log_sigma')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_log_sigmab',trainable=embeddings_trainable)
}
else:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable)
}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
if not form3:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
else:
with tf.device('/cpu:0'):
x=tf.nn.embedding_lookup(self.embw,self.x)
x+=self.embb
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
self.word_var=z
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
print z.shape
self.mid_var=z
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if form3:
with tf.device('/cpu:0'):
x=tf.nn.embedding_lookup(self.embw,x)
x+=self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if not vanilla:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=tf.constant(0.0)
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if mid_vae:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=tf.constant(0.0)
if mid_vae:
print 'stop fucking sampling',mid_vae
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False,timestep=0):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:timestep})
# print shit
# print deb
# exit()
return cost,shit
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([network_weights['embmap'],network_weights['embmap_biases']],network_weights['embmap'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print state,output.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word = tf.argmax(logit, 1)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print logit.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print previous_word.shape
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x})
print f_it
print generated_word_index
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
global_step=tf.Variable(0,trainable=False)
total_batch = int(n_samples / batch_size)
if should_decay and not gen:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
total_batch, 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
# indlist=np.arange(10*batch_size).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
np.random.shuffle(indlist)
testify=False
avg_loss=0
# for i in range(1):
for i in range(total_batch):
# break
ts=i
# i=0
inds=np.random.choice(indlist,batch_size)
# print indlist[i*batch_size:(i+1)*batch_size]
# batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
batch_xs = X[inds]
# Fit training using batch data
# if epoch==2 and i ==0:
# testify=True
# cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+ts,testify=testify)
cost,loss = vae.partial_fit(batch_xs,y[inds].astype(np.uint32),mask[inds],timestep=(epoch)+1e-3,testify=testify)
# Compute average loss
avg_cost = avg_cost * i /(i+1) +cost/(i+1)
# avg_loss=avg_loss*i/(i+1)+loss/(i+1)
if i% display_step==0:
print avg_cost,loss,cost
if epoch == 0 and ts==0:
costs.append(avg_cost)
costs.append(avg_cost)
# Display logs per epoch step
if epoch % (display_step*10) == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, os.path.join(model_path,'model'))
pkl.dump(costs,open(loss_output_path,'wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
import sys
form2=True
vanilla=True
if sys.argv[1]!='vanilla':
vanilla=False
mid_vae=False
form3= True
form4=False
vanilla=True
if sys.argv[2]=='mid_vae':
mid_vae=True
print 'mid_vae'
same_embedding=False
clip_grad=True
if sys.argv[3]!='clip':
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
# should_continue=True
should_decay=True
zero_end_tok=True
training_epochs=int(sys.argv[13])
batch_size=int(sys.argv[4])
onehot=False
embeddings_trainable=False
if sys.argv[5]!='transfer':
print 'true embs'
embeddings_trainable=True
transfertype2=True
binary_dim=int(sys.argv[6])
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(50000-3)
else:
X, y, mask, _map = load_text(50000-2)
n_input =50000
n_samples = 30000
lstm_dim=int(sys.argv[7])
model_path = sys.argv[8]
vartype=''
transfertype=''
maxlen=int(sys.argv[9])+2
n_z=int(sys.argv[10])
n_z_m=int(sys.argv[11])
n_z_m_2=int(sys.argv[12])
if not vanilla:
vartype='var'
if not embeddings_trainable:
transfertype='transfer'
cliptype=''
if clip_grad:
cliptype='clip'
use_ctc=False
losstype=''
if sys.argv[14]=='ctc_loss':
use_ctc=True
losstype='ctc'
lstm_stack=int(sys.argv[15])
use_bdlstm=False
bdlstmtype=''
if sys.argv[16]!='forward':
use_bdlstm=True
bdlstmtype='bdlstm'
loss_output_path= 'losses/%s%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%sdefdef%s4.pkl'%(bdlstmtype,str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype))
all_samps=len(X)
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=n_z, # dimensionality of latent space
n_z_m=n_z_m,
n_z_m_2=n_z_m_2
)
# batch_size=1
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
# np.random.shuffle(ind_list)
x_sample = X[ind_list[:batch_size]]
print x_sample
y_sample = y[ind_list[:batch_size]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
print(ixtoword(_map,bin_to_int(np.expand_dims(x_sample[:10],axis=0))))
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
|
dricciardelli/vae2vec
|
def_def_alt_oh.py
|
Python
|
mit
| 35,911
|
[
"Gaussian"
] |
3c61807f5c1d905574f98931484df0fa2eb3c7ed14e28aba713a0c7ce9e4dbca
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Brian Cottingham
# spiffyech@gmail.com
#This file is part of Spiffybot.
#
#Spiffybot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Spiffybot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Spiffybot. If not, see <http://www.gnu.org/licenses/>.
import os
import random
import re
import readline
from sqlite3 import dbapi2 as sqlite
import string
import sys
import time
import traceback
import commands
from createDB import createDB
import irclib
import ircTools
from misc import misc
#from tell import tell
from modules import *
try:
import server_details
except:
print "You need to create a server_details.py before you can run the bot."
print "You can copy server_details.py.example to get started."
exit(1)
# IRC connection information
network = 'short.csc.ncsu.edu'
port = 1337
channels = ['##bottest',]
nick = 'spiffybot'
realName = 'spiffybot'
# Enable debug mode if the appropriate command line parameter was passed
if len(sys.argv) > 1 and sys.argv[1] == "--debug":
DEBUG = True
else:
DEBUG = False
# Open the database
dbName = "logs.db"
if not os.path.exists(dbName):
createDB(dbName)
dbConn = sqlite.connect(dbName)
cursor = dbConn.cursor()
def main():
# Create an IRC object
irc = irclib.IRC()
if DEBUG:
irclib.DEBUG = True # Uncomment this to dump all irclib events to stdout
# Create a server object, connect and join the channels
global server
server = irc.server()
server.connect(network, port, nick, ircname=realName, username=server_details.username, password=server_details.password, ssl=server_details.ssl)
joinChannels()
# Add handler functions for various IRC events
irc.add_global_handler("pubmsg", handleMessage)
# irc.add_global_handler("ctcp", handleMessage) # Commented out until I fix bug that prevents this from logging properly
irc.add_global_handler("privmsg", handleMessage)
irc.add_global_handler("join", handleJoin)
irc.add_global_handler("part", handlePart)
irc.add_global_handler("quit", handleQuit)
irc.add_global_handler("kick", handleKick)
irc.add_global_handler("topic", handleTopic)
irc.add_global_handler("nicknameinuse", changeNick)
# Fork off our child process for operator input
pid = os.fork()
if pid:
termInput(server)
pid = os.fork()
if pid:
watchLoop(server)
# In the parent process, start listening for connections
while 1:
if not DEBUG: # Debug CLI arg disables crash message, enables regular Python exception printing, shows irclib raw data
try:
irc.process_forever()
except KeyboardInterrupt:
break
except:
printException()
server.privmsg("spiffytech", "I crashed!")
else:
irc.process_forever()
class ircEvent:
def __init__(self, connection, event, args):
self.connection = connection
self.eventType = event.eventtype()
self.nick = nick
self.user = event.sourceuser() # User who instigated an event (join, part, pubmsg, etc.)
self.sender = event.source().split("!")[0]
self.args = args
if event.target() == self.nick: # private messages
self.channel = self.sender
else:
self.channel = event.target()
def reply(self, message):
self.connection.privmsg(self.channel, message)
def setNick(self, newNick):
self.connection.nick(newNick)
global nick
nick = newNick
def printException(maxTBlevel=5):
'''This code is copy/pasted. I don't know how it works, and it doesn't work completely.'''
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.__dict__["args"]
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
print excName
print excArgs
print "\n".join(excTb)
def changeNick(connection=None, event=None, newNick=None):
'''If the bot's nick is used, this function generates another nick'''
if connection == None and event == None: # Called within newBot.py, not from an event handler
connection.nick(newNick)
global nick
nick = newNick
else:
newNick = ""
if len(connection.nickname) < 15:
newNick = connection.nickname + "_"
connection.nick(newNick)
joinChannels(connection)
nick = newNick
else:
chars = string.letters + string.numbers
random.seed(time.time)
for i in range(0, random.randint(2, len(string.digits)-1)):
newNick += chars[random.randint(0, len("".letters)-1)]
connection.nick(newNick)
joinChannels(connection)
nick = newNick
########## Functions to handle channel user connection events ##########
# All of these functions just call recordEvent, but are here in case I ever want do do more than that when handling events
def handleJoin(connection, event):
event = ircEvent(connection, event, args=None)
tell.deliverMessages(event)
recordEvent(event)
def handlePart(connection, event):
event = ircEvent(connection, event, args=None)
recordEvent(event)
def handleQuit(connection, event):
event = ircEvent(connection, event, args=None)
recordEvent(event)
def handleKick(connection, event):
event = ircEvent(connection, event, args=None)
recordEvent(event)
def recordEvent(event):
'''Log channel all connection events to the database. There's no real reason for it presently; just doing it for kicks and giggles.'''
global dbConn
global cursor
user = event.user.decode("utf-8")
alteredTime = str(time.time())
cursor.execute("insert into connevents values (?, ?, ?, ?)", (user, event.eventType, event.channel, alteredTime))
dbConn.commit()
########################################################################
def handleTopic(connection, event):
'''Log topic changes'''
# TODO: Log topic when first entering a channel
global dbConn
global cursor
alterer = event.sourceuser().decode("utf-8") # Who changed the topic
topic = event.arguments()[0].decode("utf-8") # New topic
alteredTime = str(time.time())
cursor.execute("insert into topic_history values (?, ?, ?, ?)", (topic, alteredTime, alterer, event.target()))
dbConn.commit()
def handleMessage(connection, event):
'''Someone sent a message to a channel!'''
# Parse the raw IRC data contents
sender = event.sourceuser().decode("utf-8") # Who sent the message
message = event.arguments()[0].decode("utf-8") # Get the channel's new message and split it for parsing
# BEFORE ANYTHING ELSE, record the message in our logs
global dbConn
global cursor
cursor.execute("insert into messages values (?, ?, ?, ?)", (sender, event.target(), message, unicode(time.time())))
dbConn.commit()
# First, see if this triggers a message delivery for whoever just spoke
tell.deliverMessages(ircEvent(connection, event, args=None))
# # Next, check for echoes
# ircTools.echo(ircEvent(connection, event, None))
# See if the message corresponds to any (not-a-command) patterns we care about
for command in commands.anyMessage:
if DEBUG:
print command[0]
r = re.search(command[0], message, re.IGNORECASE)
if r != None:
try:
args = r.group("args").strip()
except:
args = None
execString = command[1] + "(ircEvent(connection, event, args))" # Using a string instead of storing the function facilitates the planned automatic module loading feature.
eval(execString)
# Next, see if the message is something we care about (i.e., a command)
if re.match(nick + "[:\-, ]", message) or event.eventtype() == "privmsg": # If it's a command for us:
# ^^ startswith: "spiffybot: calc" || privmsg part ensures this code is triggered if the message is a PM
if not event.eventtype() == "privmsg":
message = message.partition(" ")[2] # No nick at the front of a private message
# Run the command
foundMatch = False
for command in commands.cmds:
print command[0]
r = re.search(command[0], message, re.IGNORECASE)
if r != None:
foundMatch = True
args = r.group("args").strip()
execString = command[1] + "(ircEvent(connection, event, args))" # Using a string instead of storing the function facilitates the planned automatic module loading feature.
eval(execString)
if foundMatch == False:
connection.privmsg(event.target(), "Maybe") # Respond with this if we don't have anything else to respond with
def cmdJoin(event):
event.connection.join(event.args)
def cmdNick(event):
event.setNick(event.args)
def cmdPart(event):
event.connection.part(event.channel)
def updateCommands():
'''Reload the main commands file'''
# Currently broken
reload(commands)
def joinChannels(connection=None, event=None):
# Join all specified channels at program launch
# In Separate function to facilitate joins after nick collision
for channel in channels:
server.join(channel)
def termInput(conn):
'''Child process that reads input from the user and sends it to the channels'''
if not "channel" in locals():
channel = channels[0]
while 1:
msg = raw_input("Talk to channel: ") # Get a message from the user
if msg.startswith("="): # Set the channel we want to talk to
# Examples: "=#ncsulug", "=#ncsulug Send this message!"
msg = msg.split()
channel = msg[0].split("=")[1]
if len(msg) > 1:
conn.privmsg(channel, " ".join(msg[1:]))
continue
conn.privmsg(channel, msg)
def watchLoop(connection):
while 1:
time.sleep(3)
# tell.deliverMessages(ircEvent(connection, event=None, args=None))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Exiting..."
|
spiffytech/spiffybot
|
spiffybot.py
|
Python
|
gpl-3.0
| 10,756
|
[
"Brian"
] |
1ce9b6dca9dedfbf840d6aaa83850463b5e2fa06c8dba4d840f8bd329a1ffdb0
|
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from .coordinates import Coordinate
class ConstrainDrawingRectangle:
def __init__(self, x1, y1, x2, y2, sendingTarget):
self.constraint1 = Coordinate.fromCoords(min(x1, x2), min(y1, y2),
False)
self.constraint2 = Coordinate.fromCoords(max(x1, x2), max(y1, y2),
False)
self.sendingTarget = sendingTarget
self.outstandingMove = False
self.moveDrawingCoordinates = None
self.outOfBounds = True
self.outOfBoundsDrawingCoord = None
self.currentDrawingPosition = None
self.width = self.constraint2.x - self.constraint1.x
self.height = self.constraint2.y - self.constraint1.y
def __str__(self):
return "({}, {})".format(self.constraint1, self.constraint2)
def isOutsideDrawingArea(self, coord):
"""
:param coord:
:rtype: bool
"""
if coord.x - self.constraint1.x < -0.000001 \
or coord.x - self.constraint2.x > 0.000001 \
or coord.y - self.constraint1.y < -0.000001 \
or coord.y - self.constraint2.y > 0.000001:
return True
else:
return False
def crossBoundary(self, coord, leaving):
ret = None
if leaving:
if abs(coord.x - self.currentDrawingPosition.x) < 0.000001:
# vertical line
if coord.y - self.constraint1.y < -0.000001:
ret = Coordinate.fromCoords(coord.x, self.constraint1.y, coord.penup)
elif coord.y - self.constraint2.y > 0.000001:
ret = Coordinate.fromCoords(coord.x, self.constraint2.y, coord.penup)
else:
m = (self.currentDrawingPosition.y - coord.y) / (self.currentDrawingPosition.x - coord.x)
b = self.currentDrawingPosition.y - m * self.currentDrawingPosition.x
if coord.x < self.constraint1.x:
ret = Coordinate.fromCoords(self.constraint1.x, m * self.constraint1.x + b, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None and coord.x > self.constraint2.x:
ret = Coordinate.fromCoords(self.constraint2.x, m * self.constraint2.x + b, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None and coord.y < self.constraint1.y:
ret = Coordinate.fromCoords((self.constraint1.y - b) / m, self.constraint1.y, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None and coord.y > self.constraint2.y:
ret = Coordinate.fromCoords((self.constraint2.y - b) / m, self.constraint2.y, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
else:
if abs(coord.x - self.outOfBoundsDrawingCoord.x) < 0.000001:
# vertical line
if self.outOfBoundsDrawingCoord.y - self.constraint1.y < -0.000001:
ret = Coordinate.fromCoords(coord.x, self.constraint1.y, coord.penup)
elif self.outOfBoundsDrawingCoord.y - self.constraint2.y > 0.000001:
ret = Coordinate.fromCoords(coord.x, self.constraint2.y, coord.penup)
else:
m = (coord.y - self.outOfBoundsDrawingCoord.y) / (coord.x - self.outOfBoundsDrawingCoord.x)
b = coord.y - m * coord.x
if self.outOfBoundsDrawingCoord.x < self.constraint1.x:
ret = Coordinate.fromCoords(self.constraint1.x, m * self.constraint1.x + b, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None and self.outOfBoundsDrawingCoord.x > self.constraint2.x:
ret = Coordinate.fromCoords(self.constraint2.x, m * self.constraint2.x + b, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None and self.outOfBoundsDrawingCoord.y < self.constraint1.y:
ret = Coordinate.fromCoords((self.constraint1.y - b) / m, self.constraint1.y, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None and self.outOfBoundsDrawingCoord.y > self.constraint2.y:
ret = Coordinate.fromCoords((self.constraint2.y - b) / m, self.constraint2.y, coord.penup)
if self.isOutsideDrawingArea(ret):
ret = None
if ret is None:
print("ConstrainDrawingRectangle: Oops - somethings went wrong {} - {} or {} : {}").format(coord, self.currentDrawingPosition, self.outOfBoundsDrawingCoord, leaving )
print("ConstrainDrawingRectangle: constraints = {}").format(self)
return ret
def sendCommand(self, coord):
"""
sendCommand Ensured the commands are to draw entirely within the degined drawing area, if not only draw what
is inside thge drawing area. Sends the command to the drawing engines.
:type coord: Coordinate
:param coord: Coordinate to draw or move to in drawing coordinate system
:return:
"""
try:
if self.outOfBounds:
# last command left current draw position outside drawing area
if self.isOutsideDrawingArea(coord):
# current command also leaves the draw position outside the drawing area
self.outOfBoundsDrawingCoord = coord
else:
# This command moves the drawing position back into drawing area
if coord.penup:
# This command is a move, so simple move to the correct position
self.sendingTarget.sendCommand(coord)
else:
# This command is a draw, so calculate where the line crosses the drawing area boundary and
# draw a line from the crossing point to the point specified in the command
crossBoundryPoint = self.crossBoundary(coord, False)
crossBoundryPoint.penup = True
self.sendingTarget.sendCommand(crossBoundryPoint)
self.sendingTarget.sendCommand(coord)
self.outOfBounds = False
else:
# drawing position before this command was inside the drawing area
if self.isOutsideDrawingArea(coord):
# This command will take the drawing position outside the drawing area. I not a move then draw a line
# to the point where the line crosses the drawing area boundary
if not coord.penup:
crossBoundryPoint = self.crossBoundary(coord, True)
self.sendingTarget.sendCommand(crossBoundryPoint)
self.outOfBoundsDrawingCoord = coord
self.outOfBounds = True
else:
# all inside drawing area
self.sendingTarget.sendCommand(coord)
self.currentDrawingPosition = coord
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("ConstrainDrawingRectangle exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
def moveTo(self, x, y):
self.moveDrawingCoordinates = Coordinate.fromCoords(x, y, True)
self.outstandingMove = True
def drawTo(self, x, y):
if self.outstandingMove:
self.sendCommand(self.moveDrawingCoordinates)
self.currentDrawingPosition = self.moveDrawingCoordinates
self.outstandingMove = False
coords = Coordinate.fromCoords(x, y, False)
self.sendCommand(coords)
self.currentDrawingPosition = coords
|
brianinnes/pycupi
|
python/vPiP/constrainDrawingRectangle.py
|
Python
|
apache-2.0
| 8,796
|
[
"Brian"
] |
5e089765242bfbb1f9c2e8bfae829b6f561032b3b110a52b71d32cbc6567aa4f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
JahnTeller distortion analysis.
"""
import os
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.analysis.local_env import (
LocalStructOrderParams,
get_neighbors_of_site_with_index,
)
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.periodic_table import Specie, get_el_sp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import warnings
from typing import Dict, Tuple, Union, Optional, Any
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class JahnTellerAnalyzer:
"""
Will attempt to classify if structure *may* be Jahn-Teller active.
Class currently uses datafile of hard-coded common Jahn-Teller
active ions.
If structure is annotated with magnetic moments, will estimate
if structure may be high-spin or low-spin.
Class aims for more false-positives than false-negatives.
"""
def __init__(self):
"""
Init for JahnTellerAnalyzer.
"""
self.spin_configs = {
"oct": { # key is number of d electrons
0: {"high": {"e_g": 0, "t_2g": 0}, "default": "high"},
1: {"high": {"e_g": 0, "t_2g": 1}, "default": "high"}, # weak J-T
2: {"high": {"e_g": 0, "t_2g": 2}, "default": "high"}, # weak
3: {"high": {"e_g": 0, "t_2g": 3}, "default": "high"}, # no J-T
4: {
"high": {"e_g": 1, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 4},
"default": "high",
}, # strong high, weak low
5: {
"high": {"e_g": 2, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 5},
"default": "low",
}, # no high, weak low
6: {
"high": {"e_g": 2, "t_2g": 4},
"low": {"e_g": 0, "t_2g": 6},
"default": "high",
}, # weak high, no low
7: {
"high": {"e_g": 2, "t_2g": 5},
"low": {"e_g": 1, "t_2g": 6},
"default": "low",
}, # weak high, strong low
8: {"high": {"e_g": 2, "t_2g": 6}, "default": "high"}, # no
9: {"high": {"e_g": 3, "t_2g": 6}, "default": "high"}, # strong
10: {"high": {"e_g": 4, "t_2g": 6}, "default": "high"},
},
"tet": { # no low spin observed experimentally in tetrahedral, all weak J-T
0: {"high": {"e": 0, "t_2": 0}, "default": "high"},
1: {"high": {"e": 1, "t_2": 0}, "default": "high"},
2: {"high": {"e": 2, "t_2": 0}, "default": "high"},
3: {"high": {"e": 2, "t_2": 1}, "default": "high"},
4: {"high": {"e": 2, "t_2": 2}, "default": "high"},
5: {"high": {"e": 2, "t_2": 3}, "default": "high"},
6: {"high": {"e": 3, "t_2": 3}, "default": "high"},
7: {"high": {"e": 4, "t_2": 3}, "default": "high"},
8: {"high": {"e": 4, "t_2": 4}, "default": "high"},
9: {"high": {"e": 4, "t_2": 5}, "default": "high"},
10: {"high": {"e": 4, "t_2": 6}, "default": "high"},
},
}
def get_analysis_and_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Tuple[Dict, Structure]:
"""Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1) and decorated structure
"""
structure = structure.get_primitive_structure()
if calculate_valences:
bva = BVAnalyzer()
structure = bva.get_oxi_state_decorated_structure(structure)
# no point testing multiple equivalent sites, doesn't make any difference to analysis
# but makes returned
symmetrized_structure = SpacegroupAnalyzer(
structure
).get_symmetrized_structure()
# to detect structural motifs of a given site
op = LocalStructOrderParams(["oct", "tet"])
# dict of site index to the Jahn-Teller analysis of that site
jt_sites = []
non_jt_sites = []
for indices in symmetrized_structure.equivalent_indices:
idx = indices[0]
site = symmetrized_structure[idx]
# only interested in sites with oxidation states
if (
isinstance(site.specie, Specie)
and site.specie.element.is_transition_metal
):
# get motif around site
order_params = op.get_order_parameters(symmetrized_structure, idx)
if order_params[0] > order_params[1] and order_params[0] > op_threshold:
motif = "oct"
motif_order_parameter = order_params[0]
elif order_params[1] > op_threshold:
motif = "tet"
motif_order_parameter = order_params[1]
else:
motif = "unknown"
motif_order_parameter = None
if motif == "oct" or motif == "tet":
# guess spin of metal ion
if guesstimate_spin and "magmom" in site.properties:
# estimate if high spin or low spin
magmom = site.properties["magmom"]
spin_state = self._estimate_spin_state(
site.specie, motif, magmom
)
else:
spin_state = "unknown"
magnitude = self.get_magnitude_of_effect_from_species(
site.specie, spin_state, motif
)
if magnitude != "none":
ligands = get_neighbors_of_site_with_index(
structure, idx, approach="min_dist", delta=0.15
)
ligand_bond_lengths = [
ligand.distance(structure[idx]) for ligand in ligands
]
ligands_species = list(
set([str(ligand.specie) for ligand in ligands])
)
ligand_bond_length_spread = max(ligand_bond_lengths) - min(
ligand_bond_lengths
)
def trim(f):
"""
Avoid storing to unreasonable precision, hurts readability.
"""
return float("{:.4f}".format(f))
# to be Jahn-Teller active, all ligands have to be the same
if len(ligands_species) == 1:
jt_sites.append(
{
"strength": magnitude,
"motif": motif,
"motif_order_parameter": trim(
motif_order_parameter
),
"spin_state": spin_state,
"species": str(site.specie),
"ligand": ligands_species[0],
"ligand_bond_lengths": [
trim(length) for length in ligand_bond_lengths
],
"ligand_bond_length_spread": trim(
ligand_bond_length_spread
),
"site_indices": indices,
}
)
# store reasons for not being J-T active
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": "Not Jahn-Teller active for this "
"electronic configuration.",
}
)
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": "motif is {}".format(motif),
}
)
# perform aggregation of all sites
if jt_sites:
analysis = {"active": True} # type: Dict[str, Any]
# if any site could exhibit 'strong' Jahn-Teller effect
# then mark whole structure as strong
strong_magnitudes = [site["strength"] == "strong" for site in jt_sites]
if any(strong_magnitudes):
analysis["strength"] = "strong"
else:
analysis["strength"] = "weak"
analysis["sites"] = jt_sites
return analysis, structure
else:
return {"active": False, "sites": non_jt_sites}, structure
def get_analysis(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Dict:
"""
Convenience method, uses get_analysis_and_structure method.
Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1)
"""
return self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)[0]
def is_jahn_teller_active(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> bool:
"""
Convenience method, uses get_analysis_and_structure method.
Check if a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
boolean, True if might be Jahn-Teller active, False if not
"""
active = False
try:
analysis = self.get_analysis(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
active = analysis["active"]
except Exception as e:
warnings.warn(
"Error analyzing {}: {}".format(
structure.composition.reduced_formula, e
)
)
return active
def tag_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Structure:
"""
Convenience method, uses get_analysis_and_structure method.
Add a "possible_jt_active" site property on Structure.
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
Decorated Structure, will be in primitive setting.
"""
try:
analysis, structure = self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
jt_sites = [False] * len(structure)
if analysis["active"]:
for site in analysis["sites"]:
for index in site["site_indices"]:
jt_sites[index] = True
structure.add_site_property("possible_jt_active", jt_sites)
return structure
except Exception as e:
warnings.warn(
"Error analyzing {}: {}".format(
structure.composition.reduced_formula, e
)
)
return structure
@staticmethod
def _get_number_of_d_electrons(species: Specie) -> float:
"""
Get number of d electrons of a species.
Args:
species: Specie object
Returns: Number of d electrons.
"""
# TODO: replace with more generic Hund's rule algorithm?
# taken from get_crystal_field_spin
elec = species.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
species.symbol
)
)
nelectrons = int(elec[-1][2] + elec[-2][2] - species.oxi_state)
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}".format(
species.oxi_state, species.symbol
)
)
return nelectrons
def get_magnitude_of_effect_from_species(
self, species: Union[str, Specie], spin_state: str, motif: str
) -> str:
"""
Get magnitude of Jahn-Teller effect from provided species, spin state and motif.
Args:
species: e.g. Fe2+
spin_state: "high" or "low"
motif: "oct" or "tet"
Returns: "none", "weak" or "strong
"""
magnitude = "none"
sp = get_el_sp(species)
# has to be Specie; we need to know the oxidation state
if isinstance(sp, Specie) and sp.element.is_transition_metal:
d_electrons = self._get_number_of_d_electrons(sp)
if motif in self.spin_configs:
if spin_state not in self.spin_configs[motif][d_electrons]:
spin_state = self.spin_configs[motif][d_electrons]["default"]
spin_config = self.spin_configs[motif][d_electrons][spin_state]
magnitude = JahnTellerAnalyzer.get_magnitude_of_effect_from_spin_config(
motif, spin_config
)
else:
warnings.warn("No data for this species.")
return magnitude
@staticmethod
def get_magnitude_of_effect_from_spin_config(
motif: str, spin_config: Dict[str, float]
) -> str:
"""
Roughly, the magnitude of Jahn-Teller distortion will be:
* in octahedral environments, strong if e_g orbitals
unevenly occupied but weak if t_2g orbitals unevenly
occupied
* in tetrahedral environments always weaker
Args:
motif: "oct" or "tet"
spin_config: dict of 'e' (e_g) and 't' (t2_g)
with number of electrons in each state
Returns: "none", "weak" or "strong"
"""
magnitude = "none"
if motif == "oct":
e_g = spin_config["e_g"]
t_2g = spin_config["t_2g"]
if (e_g % 2 != 0) or (t_2g % 3 != 0):
magnitude = "weak"
if e_g % 2 == 1:
magnitude = "strong"
elif motif == "tet":
e = spin_config["e"]
t_2 = spin_config["t_2"]
if (e % 3 != 0) or (t_2 % 2 != 0):
magnitude = "weak"
return magnitude
@staticmethod
def _estimate_spin_state(
species: Union[str, Specie], motif: str, known_magmom: float
) -> str:
"""Simple heuristic to estimate spin state. If magnetic moment
is sufficiently close to that predicted for a given spin state,
we assign it that state. If we only have data for one spin
state then that's the one we use (e.g. we assume all tetrahedral
complexes are high-spin, since this is typically the case).
Args:
species: str or Species
motif: "oct" or "tet"
known_magmom: magnetic moment in Bohr magnetons
Returns: "undefined" (if only one spin state possible), "low",
"high" or "unknown"
"""
mu_so_high = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="high")
mu_so_low = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="low")
if mu_so_high == mu_so_low:
return "undefined" # undefined or only one spin state possible
elif mu_so_high is None:
return "low"
elif mu_so_low is None:
return "high"
else:
diff = mu_so_high - mu_so_low
# WARNING! this heuristic has not been robustly tested or benchmarked
# using 'diff*0.25' as arbitrary measure, if known magmom is
# too far away from expected value, we don't try to classify it
if (
known_magmom > mu_so_high
or abs(mu_so_high - known_magmom) < diff * 0.25
):
return "high"
elif (
known_magmom < mu_so_low or abs(mu_so_low - known_magmom) < diff * 0.25
):
return "low"
else:
return "unknown"
@staticmethod
def mu_so(
species: Union[str, Specie], motif: str, spin_state: str
) -> Optional[float]:
"""Calculates the spin-only magnetic moment for a
given species. Only supports transition metals.
Args:
species: Species
motif: "oct" or "tet"
spin_state: "high" or "low"
Returns:
Spin-only magnetic moment in Bohr magnetons or None if
species crystal field not defined
"""
try:
sp = get_el_sp(species)
n = sp.get_crystal_field_spin(coordination=motif, spin_config=spin_state)
# calculation spin-only magnetic moment for this number of unpaired spins
return np.sqrt(n * (n + 2))
except AttributeError:
return None
|
gVallverdu/pymatgen
|
pymatgen/analysis/magnetism/jahnteller.py
|
Python
|
mit
| 21,643
|
[
"CRYSTAL",
"pymatgen"
] |
8747772dedd2617779f14ce52fadf9698c63c4add3095d491f7021f7f3fc2298
|
"""
Student Views
"""
import datetime
import json
import logging
import uuid
import warnings
from collections import defaultdict, namedtuple
from urlparse import parse_qs, urlsplit, urlunsplit
import django
import analytics
import edx_oauth2_provider
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, load_backend, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.views import password_reset_confirm
from django.core import mail
from django.template.context_processors import csrf
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.core.validators import ValidationError, validate_email
from django.db import IntegrityError, transaction
from django.db.models.signals import post_save
from django.dispatch import Signal, receiver
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.encoding import force_bytes, force_text
from django.utils.http import base36_to_int, is_safe_url, urlencode, urlsafe_base64_encode
from django.utils.translation import ugettext as _
from django.utils.translation import get_language, ungettext
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_POST
from django.views.generic import TemplateView
from ipware.ip import get_ip
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from provider.oauth2.models import Client
from pytz import UTC
from ratelimitbackend.exceptions import RateLimitException
from requests import HTTPError
from social_core.backends import oauth as social_oauth
from social_core.exceptions import AuthAlreadyAssociated, AuthException
from social_django import utils as social_utils
import dogstats_wrapper as dog_stats_api
import openedx.core.djangoapps.external_auth.views
import third_party_auth
from third_party_auth.saml import SAP_SUCCESSFACTORS_SAML_KEY
import track.views
from bulk_email.models import BulkEmailFlag, Optout # pylint: disable=import-error
from certificates.api import get_certificate_url, has_html_certificates_enabled # pylint: disable=import-error
from certificates.models import ( # pylint: disable=import-error
CertificateStatuses,
certificate_status_for_student
)
from course_modes.models import CourseMode
from courseware.access import has_access
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from django_comment_common.models import assign_role
from edxmako.shortcuts import render_to_response, render_to_string
from eventtracking import tracker
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps import monitoring_utils
from openedx.core.djangoapps.catalog.utils import get_programs_with_type
from openedx.core.djangoapps.certificates.api import certificates_viewable_for_course
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.embargo import api as embargo_api
from openedx.core.djangoapps.external_auth.login_and_register import login as external_auth_login
from openedx.core.djangoapps.external_auth.login_and_register import register as external_auth_register
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import ProgramProgressMeter
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangolib.markup import HTML
from openedx.features.course_experience import course_home_url_name
from openedx.features.enterprise_support.api import get_dashboard_consent_notification
from shoppingcart.api import order_history
from shoppingcart.models import CourseRegistrationCode, DonationConfiguration
from student.cookies import delete_logged_in_cookies, set_logged_in_cookies, set_user_info_cookie
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.helpers import (
DISABLE_UNENROLL_CERT_STATES,
auth_pipeline_urls,
check_verify_status_by_course,
destroy_oauth_tokens,
get_next_url_for_login_page
)
from student.models import (
ALLOWEDTOENROLL_TO_ENROLLED,
CourseAccessRole,
CourseEnrollment,
CourseEnrollmentAllowed,
CourseEnrollmentAttribute,
DashboardConfiguration,
LinkedInAddToProfileConfiguration,
LoginFailures,
ManualEnrollmentAudit,
PasswordHistory,
PendingEmailChange,
Registration,
RegistrationCookieConfiguration,
UserAttribute,
UserProfile,
UserSignupSource,
UserStanding,
anonymous_id_for_user,
create_comments_service_user,
unique_id_for_user
)
from student.signals import REFUND_ORDER
from student.tasks import send_activation_email
from third_party_auth import pipeline, provider
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.milestones_helpers import get_pre_requisite_courses_not_completed
from util.password_policy_validators import validate_password_length, validate_password_strength
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "registration"])
# TODO: Remove Django 1.11 upgrade shim
# SHIM: Compensate for behavior change of default authentication backend in 1.10
if django.VERSION[0] == 1 and django.VERSION[1] < 10:
NEW_USER_AUTH_BACKEND = 'django.contrib.auth.backends.ModelBackend'
else:
# We want to allow inactive users to log in only when their account is first created
NEW_USER_AUTH_BACKEND = 'django.contrib.auth.backends.AllowAllUsersModelBackend'
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
programs_list = []
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# Maximum number of courses to display on the homepage.
context['homepage_course_max'] = configuration_helpers.get_value(
'HOMEPAGE_COURSE_MAX', settings.HOMEPAGE_COURSE_MAX
)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
# Add marketable programs to the context.
context['programs_list'] = get_programs_with_type(request.site, include_hidden=False)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: A dictionary with keys:
'status': one of 'generating', 'downloadable', 'notpassing', 'processing', 'restricted', 'unavailable', or
'certificate_earned_but_not_available'
'download_url': url, only present if show_download_url is True
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_whitelist, org_blacklist):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_whitelist (list[str]): If not None, ONLY courses of these orgs will be returned.
org_blacklist (list[str]): Courses of these orgs will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user_with_overviews_preload(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not in the whitelist.
if org_whitelist and course_overview.location.org not in org_whitelist:
continue
# Conversely, filter out any enrollments in the blacklist.
elif org_blacklist and course_overview.location.org in org_blacklist:
continue
# Else, include the enrollment.
else:
yield enrollment
def get_org_black_and_whitelist_for_site(user):
"""
Returns the org blacklist and whitelist for the current site.
Returns:
(org_whitelist, org_blacklist): A tuple of lists of orgs that serve as
either a blacklist or a whitelist of orgs for the current site. The
whitelist takes precedence, and the blacklist is used if the
whitelist is None.
"""
# Default blacklist is empty.
org_blacklist = None
# Whitelist the orgs configured for the current site. Each site outside
# of edx.org has a list of orgs associated with its configuration.
org_whitelist = configuration_helpers.get_current_site_orgs()
if not org_whitelist:
# If there is no whitelist, the blacklist will include all orgs that
# have been configured for any other sites. This applies to edx.org,
# where it is easier to blacklist all other orgs.
org_blacklist = configuration_helpers.get_all_orgs()
return (org_whitelist, org_blacklist)
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'downloadable',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
certificate_earned_but_not_available_status = 'certificate_earned_but_not_available'
default_status = 'processing'
default_info = {
'status': default_status,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
status = template_state.get(cert_status['status'], default_status)
is_hidden_status = status in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if (
not certificates_viewable_for_course(course_overview) and
(status in CertificateStatuses.PASSED_STATUSES) and
course_overview.certificate_available_date
):
status = certificate_earned_but_not_available_status
if (
course_overview.certificates_display_behavior == 'early_no_info' and
is_hidden_status
):
return default_info
status_dict = {
'status': status,
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if not status == default_status and course_overview.end_of_course_survey_url is not None:
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'downloadable':
# showing the certificate web view button if certificate is downloadable state and feature flags are enabled.
if has_html_certificates_enabled(course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['status'] = 'unavailable'
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in {'generating', 'downloadable', 'notpassing', 'restricted', 'auditing', 'unverified'}:
cert_grade_percent = -1
persisted_grade_percent = -1
persisted_grade = CourseGradeFactory().read(user, course=course_overview, create_if_needed=False)
if persisted_grade is not None:
persisted_grade_percent = persisted_grade.percent
if 'grade' in cert_status:
cert_grade_percent = float(cert_status['grade'])
if cert_grade_percent == -1 and persisted_grade_percent == -1:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
status_dict['grade'] = unicode(max(cert_grade_percent, persisted_grade_percent))
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(
openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
def generate_activation_email_context(user, registration):
"""
Constructs a dictionary for use in activation email contexts
Arguments:
user (User): Currently logged-in user
registration (Registration): Registration object for the currently logged-in user
"""
return {
'name': user.profile.name,
'key': registration.activation_key,
'lms_url': configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL),
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
'support_url': configuration_helpers.get_value('SUPPORT_SITE_LINK', settings.SUPPORT_SITE_LINK),
'support_email': configuration_helpers.get_value('CONTACT_EMAIL', settings.CONTACT_EMAIL),
}
def compose_and_send_activation_email(user, profile, user_registration=None):
"""
Construct all the required params and send the activation email
through celery task
Arguments:
user: current logged-in user
profile: profile object of the current logged-in user
user_registration: registration of the current logged-in user
"""
dest_addr = user.email
if user_registration is None:
user_registration = Registration.objects.get(user=user)
context = generate_activation_email_context(user, user_registration)
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message_for_activation = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
from_address = configuration_helpers.get_value('ACTIVATION_EMAIL_FROM_ADDRESS', from_address)
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message_for_activation = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message_for_activation)
send_activation_email.delay(subject, message_for_activation, from_address, dest_addr)
@login_required
@ensure_csrf_cookie
def dashboard(request):
"""
Provides the LMS dashboard view
TODO: This is lms specific and does not belong in common code.
Arguments:
request: The request object.
Returns:
The dashboard response.
"""
user = request.user
if not UserProfile.objects.filter(user=user).exists():
return redirect(reverse('account_settings'))
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
enable_verified_certificates = configuration_helpers.get_value(
'ENABLE_VERIFIED_CERTIFICATES',
settings.FEATURES.get('ENABLE_VERIFIED_CERTIFICATES')
)
display_course_modes_on_dashboard = configuration_helpers.get_value(
'DISPLAY_COURSE_MODES_ON_DASHBOARD',
settings.FEATURES.get('DISPLAY_COURSE_MODES_ON_DASHBOARD', True)
)
activation_email_support_link = configuration_helpers.get_value(
'ACTIVATION_EMAIL_SUPPORT_LINK', settings.ACTIVATION_EMAIL_SUPPORT_LINK
) or settings.SUPPORT_SITE_LINK
# get the org whitelist or the org blacklist for the current site
site_org_whitelist, site_org_blacklist = get_org_black_and_whitelist_for_site(user)
course_enrollments = list(get_course_enrollments(user, site_org_whitelist, site_org_blacklist))
# Record how many courses there are so that we can get a better
# understanding of usage patterns on prod.
monitoring_utils.accumulate('num_courses', len(course_enrollments))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
sidebar_account_activation_message = ''
banner_account_activation_message = ''
display_account_activation_message_on_sidebar = configuration_helpers.get_value(
'DISPLAY_ACCOUNT_ACTIVATION_MESSAGE_ON_SIDEBAR',
settings.FEATURES.get('DISPLAY_ACCOUNT_ACTIVATION_MESSAGE_ON_SIDEBAR', False)
)
# Display activation message in sidebar if DISPLAY_ACCOUNT_ACTIVATION_MESSAGE_ON_SIDEBAR
# flag is active. Otherwise display existing message at the top.
if display_account_activation_message_on_sidebar and not user.is_active:
sidebar_account_activation_message = render_to_string(
'registration/account_activation_sidebar_notice.html',
{
'email': user.email,
'platform_name': platform_name,
'activation_email_support_link': activation_email_support_link
}
)
elif not user.is_active:
banner_account_activation_message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email}
)
enterprise_message = get_dashboard_consent_notification(request, user, course_enrollments)
# Disable lookup of Enterprise consent_required_course due to ENT-727
# Will re-enable after fixing WL-1315
consent_required_courses = set()
enterprise_customer_name = None
# Account activation message
account_activation_messages = [
message for message in messages.get_messages(request) if 'account-activation' in message.tags
]
# Global staff can see what courses encountered an error on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that encountered an error on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
)
# Find programs associated with course runs being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = ProgramProgressMeter(request.site, user, enrollments=course_enrollments)
inverted_programs = meter.invert_programs()
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_error_codes = SoftwareSecurePhotoVerification.user_status(user)
verification_errors = get_verification_error_reasons_for_display(verification_error_codes)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=site_org_whitelist, org_filter_out_set=site_org_blacklist)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
valid_verification_statuses = ['approved', 'must_reverify', 'pending', 'expired']
display_sidebar_on_dashboard = len(order_history_list) or verification_status in valid_verification_statuses
context = {
'enterprise_message': enterprise_message,
'consent_required_courses': consent_required_courses,
'enterprise_customer_name': enterprise_customer_name,
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'account_activation_messages': account_activation_messages,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'banner_account_activation_message': banner_account_activation_message,
'sidebar_account_activation_message': sidebar_account_activation_message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_errors': verification_errors,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'inverted_programs': inverted_programs,
'show_program_listing': ProgramsApiConfig.is_enabled(),
'show_dashboard_tabs': True,
'disable_courseware_js': True,
'display_course_modes_on_dashboard': enable_verified_certificates and display_course_modes_on_dashboard,
'display_sidebar_on_dashboard': display_sidebar_on_dashboard,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
response = render_to_response('dashboard.html', context)
set_user_info_cookie(response, request)
return response
@login_required
def course_run_refund_status(request, course_id):
"""
Get Refundable status for a course.
Arguments:
request: The request object.
course_id (str): The unique identifier for the course.
Returns:
Json response.
"""
try:
course_key = CourseKey.from_string(course_id)
course_enrollment = CourseEnrollment.get_enrollment(request.user, course_key)
except InvalidKeyError:
logging.exception("The course key used to get refund status caused InvalidKeyError during look up.")
return JsonResponse({'course_refundable_status': ''}, status=406)
refundable_status = course_enrollment.refundable()
logging.info("Course refund status for course {0} is {1}".format(course_id, refundable_status))
return JsonResponse({'course_refundable_status': refundable_status}, status=200)
def get_verification_error_reasons_for_display(verification_error_codes):
verification_errors = []
verification_error_map = {
'photos_mismatched': _('Photos are mismatched'),
'id_image_missing_name': _('Name missing from ID photo'),
'id_image_missing': _('ID photo not provided'),
'id_invalid': _('ID is invalid'),
'user_image_not_clear': _('Learner photo is blurry'),
'name_mismatch': _('Name on ID does not match name on account'),
'user_image_missing': _('Learner photo not provided'),
'id_image_not_clear': _('ID photo is blurry'),
}
for error in verification_error_codes:
error_text = verification_error_map.get(error)
if error_text:
verification_errors.append(error_text)
return verification_errors
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enrollments_count = len(recently_enrolled_courses)
course_name_separator = ', '
# If length of enrolled course 2, join names with 'and'
if enrollments_count == 2:
course_name_separator = _(' and ')
course_names = course_name_separator.join(
[enrollment.course_overview.display_name for enrollment in recently_enrolled_courses]
)
allow_donations = any(
_allow_donation(course_modes, enrollment.course_overview.id, enrollment)
for enrollment in recently_enrolled_courses
)
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{
'course_names': course_names,
'enrollments_count': enrollments_count,
'allow_donations': allow_donations,
'platform_name': platform_name,
'course_id': recently_enrolled_courses[0].course_overview.id if enrollments_count == 1 else None
}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
unicode(course_id): [mode for mode in modes]
for course_id, modes in course_modes.iteritems()
}
flat_all_modes = {
unicode(course_id): [mode.slug for mode in modes]
for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems()
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = configuration_helpers.get_value(
'ENABLE_DONATIONS',
DonationConfiguration.current().enabled
)
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
TODO: This is lms specific and does not belong in common code.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = CourseKey.from_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
# Allow us to monitor performance of this transaction on a per-course basis since we often roll-out features
# on a per-course basis.
monitoring_utils.set_custom_metric('course_id', unicode(course_id))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
REFUND_ORDER.send(sender=None, course_enrollment=enrollment)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
def _generate_not_activated_message(user):
"""
Generates the message displayed on the sign-in screen when a learner attempts to access the
system with an inactive account.
Arguments:
user (User): User object for the learner attempting to sign in.
"""
support_url = configuration_helpers.get_value(
'SUPPORT_SITE_LINK',
settings.SUPPORT_SITE_LINK
)
platform_name = configuration_helpers.get_value(
'PLATFORM_NAME',
settings.PLATFORM_NAME
)
not_activated_msg_template = _('In order to sign in, you need to activate your account.<br /><br />'
'We just sent an activation link to <strong>{email}</strong>. If '
'you do not receive an email, check your spam folders or '
'<a href="{support_url}">contact {platform} Support</a>.')
not_activated_message = not_activated_msg_template.format(
email=user.email,
support_url=support_url,
platform=platform_name
)
return not_activated_message
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.info(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
return JsonResponse({
"success": False,
"value": _generate_not_activated_message(user),
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.psa("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
access_token = request.POST["access_token"]
try:
user = backend.do_auth(access_token)
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline(access_token)
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
# Check if ALLOW_PUBLIC_ACCOUNT_CREATION flag turned off to restrict user account creation
if not configuration_helpers.get_value(
'ALLOW_PUBLIC_ACCOUNT_CREATION',
settings.FEATURES.get('ALLOW_PUBLIC_ACCOUNT_CREATION', True)
):
raise PermissionDenied()
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
# TODO duplicate email is already handled by form.errors above as a ValidationError.
# The checks for duplicate email/username should occur in the same place with an
# AccountValidationError and a consistent user message returned (i.e. both should
# return "It looks like {username} belongs to an existing account. Try again with a
# different username.")
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def _create_or_set_user_attribute_created_on_site(user, site):
# Create or Set UserAttribute indicating the microsite site the user account was created on.
# User maybe created on 'courses.edx.org', or a white-label site
if site:
UserAttribute.set_user_attribute(user, 'created_on_site', site.domain)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
* Duplicate email raises a ValidationError (rather than the expected
AccountValidationError). Duplicate username returns an inconsistent
user message (i.e. "An account with the Public Username '{username}'
already exists." rather than "It looks like {username} belongs to an
existing account. Try again with a different username.") The two checks
occur at different places in the code; as a result, registering with
both a duplicate username and email raises only a ValidationError for
email only.
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# registration via third party (Google, Facebook) using mobile application
# doesn't use social auth pipeline (no redirect uri(s) etc involved).
# In this case all related info (required for account linking)
# is sent in params.
# `third_party_auth_credentials_in_api` essentially means 'request
# is made from mobile application'
third_party_auth_credentials_in_api = 'provider' in params
is_third_party_auth_enabled = third_party_auth.is_enabled()
if is_third_party_auth_enabled and (pipeline.running(request) or third_party_auth_credentials_in_api):
params["password"] = pipeline.make_random_password()
# in case user is registering via third party (Google, Facebook) and pipeline has expired, show appropriate
# error message
if is_third_party_auth_enabled and ('social_auth_provider' in params and not pipeline.running(request)):
raise ValidationError(
{'session_expired': [
_(u"Registration using {provider} has timed out.").format(
provider=params.get('social_auth_provider'))
]}
)
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# If a 3rd party auth provider and credentials were provided in the API, link the account with social auth
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
if is_third_party_auth_enabled and third_party_auth_credentials_in_api:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline(social_access_token)
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
_create_or_set_user_attribute_created_on_site(user, request.site)
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if is_third_party_auth_enabled and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, registration=registration)
create_comments_service_user(user)
# Check if we system is configured to skip activation email for the current user.
skip_email = skip_activation_email(
user, do_external_auth, running_pipeline, third_party_provider,
)
if skip_email:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
else:
compose_and_send_activation_email(user, profile, registration)
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
backend = load_backend(NEW_USER_AUTH_BACKEND)
new_user = backend.authenticate(request=request, username=user.username, password=params['password'])
new_user.backend = NEW_USER_AUTH_BACKEND
login(request, new_user)
request.session.set_expiry(0)
try:
record_registration_attributions(request, new_user)
# Don't prevent a user from registering due to attribution errors.
except Exception: # pylint: disable=broad-except
log.exception('Error while attributing cookies to user registration.')
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def skip_activation_email(user, do_external_auth, running_pipeline, third_party_provider):
"""
Return `True` if activation email should be skipped.
Skip email if we are:
1. Doing load testing.
2. Random user generation for other forms of testing.
3. External auth bypassing activation.
4. Have the platform configured to not require e-mail activation.
5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
Note that this feature is only tested as a flag set one way or
the other for *new* systems. we need to be careful about
changing settings on a running system to make sure no users are
left in an inconsistent state (or doing a migration if they are).
Arguments:
user (User): Django User object for the current user.
do_external_auth (bool): True if external authentication is in progress.
running_pipeline (dict): Dictionary containing user and pipeline data for third party authentication.
third_party_provider (ProviderConfig): An instance of third party provider configuration.
Returns:
(bool): `True` if account activation email should be skipped, `False` if account activation email should be
sent.
"""
sso_pipeline_email = running_pipeline and running_pipeline['kwargs'].get('details', {}).get('email')
# Email is valid if the SAML assertion email matches the user account email or
# no email was provided in the SAML assertion. Some IdP's use a callback
# to retrieve additional user account information (including email) after the
# initial account creation.
valid_email = (
sso_pipeline_email == user.email or (
sso_pipeline_email is None and
third_party_provider and
getattr(third_party_provider, "identity_provider_type", None) == SAP_SUCCESSFACTORS_SAML_KEY
)
)
return (
settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) or
settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') or
(settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH') and do_external_auth) or
(third_party_provider and third_party_provider.skip_email_verification and valid_email)
)
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def record_affiliate_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user and affiliate_id:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
def record_utm_registration_attribution(request, user):
"""
Attribute this user's registration to the latest UTM referrer, if
applicable.
"""
utm_cookie_name = RegistrationCookieConfiguration.current().utm_cookie_name
utm_cookie = request.COOKIES.get(utm_cookie_name)
if user and utm_cookie:
utm = json.loads(utm_cookie)
for utm_parameter_name in REGISTRATION_UTM_PARAMETERS:
utm_parameter = utm.get(utm_parameter_name)
if utm_parameter:
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_PARAMETERS.get(utm_parameter_name),
utm_parameter
)
created_at_unixtime = utm.get('created_at')
if created_at_unixtime:
# We divide by 1000 here because the javascript timestamp generated is in milliseconds not seconds.
# PYTHON: time.time() => 1475590280.823698
# JS: new Date().getTime() => 1475590280823
created_at_datetime = datetime.datetime.fromtimestamp(int(created_at_unixtime) / float(1000), tz=UTC)
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_CREATED_AT,
created_at_datetime
)
def record_registration_attributions(request, user):
"""
Attribute this user's registration based on referrer cookies.
"""
record_affiliate_registration_attribution(request, user)
record_utm_registration_attribution(request, user)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into header.html
"""
# Check if ALLOW_PUBLIC_ACCOUNT_CREATION flag turned off to restrict user account creation
if not configuration_helpers.get_value(
'ALLOW_PUBLIC_ACCOUNT_CREATION',
settings.FEATURES.get('ALLOW_PUBLIC_ACCOUNT_CREATION', True)
):
return HttpResponseForbidden(_("Account creation not allowed."))
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def str2bool(s):
s = str(s)
return s.lower() in ('yes', 'true', 't', '1')
def _clean_roles(roles):
""" Clean roles.
Strips whitespace from roles, and removes empty items.
Args:
roles (str[]): List of role names.
Returns:
str[]
"""
roles = [role.strip() for role in roles]
roles = [role for role in roles if role]
return roles
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
* `is_active` : make/update account with status provided as 'is_active'
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
generated_username = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', generated_username)
password = request.GET.get('password', username)
email = request.GET.get('email', username + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = str2bool(request.GET.get('staff', False))
is_superuser = str2bool(request.GET.get('superuser', False))
course_id = request.GET.get('course_id')
redirect_to = request.GET.get('redirect_to')
is_active = str2bool(request.GET.get('is_active', True))
# Valid modes: audit, credit, honor, no-id-professional, professional, verified
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
# Parse roles, stripping whitespace, and filtering out empty strings
roles = _clean_roles(request.GET.get('roles', '').split(','))
course_access_roles = _clean_roles(request.GET.get('course_access_roles', '').split(','))
redirect_when_done = str2bool(request.GET.get('redirect', '')) or redirect_to
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.is_active = is_active
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
except PermissionDenied:
return HttpResponseForbidden(_('Account creation not allowed.'))
user.is_staff = is_staff
user.is_superuser = is_superuser
user.save()
if is_active:
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
_create_or_set_user_attribute_created_on_site(user, request.site)
# Enroll the user in a course
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role in roles:
assign_role(course_key, user, role)
for role in course_access_roles:
CourseAccessRole.objects.update_or_create(user=user, course_id=course_key, org=course_key.org, role=role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
if redirect_when_done:
if redirect_to:
# Redirect to page specified by the client
redirect_url = redirect_to
elif course_id:
# Redirect to the course homepage (in LMS) or outline page (in Studio)
try:
redirect_url = reverse(course_home_url_name(course_key), kwargs={'course_id': course_id})
except NoReverseMatch:
redirect_url = reverse('course_handler', kwargs={'course_key_string': course_id})
else:
# Redirect to the learner dashboard (in LMS) or homepage (in Studio)
try:
redirect_url = reverse('dashboard')
except NoReverseMatch:
redirect_url = reverse('home')
return redirect(redirect_url)
else:
response = JsonResponse({
'created_status': 'Logged in' if login_when_done else 'Created',
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
# If request is in Studio call the appropriate view
if theming_helpers.get_project_root_name().lower() == u'cms':
return activate_account_studio(request, key)
try:
registration = Registration.objects.get(activation_key=key)
except (Registration.DoesNotExist, Registration.MultipleObjectsReturned):
messages.error(
request,
HTML(_(
'{html_start}Your account could not be activated{html_end}'
'Something went wrong, please <a href="{support_url}">contact support</a> to resolve this issue.'
)).format(
support_url=configuration_helpers.get_value('SUPPORT_SITE_LINK', settings.SUPPORT_SITE_LINK),
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon'
)
else:
if not registration.user.is_active:
registration.activate()
# Success message for logged in users.
message = _('{html_start}Success{html_end} You have activated your account.')
if not request.user.is_authenticated():
# Success message for logged out users
message = _(
'{html_start}Success! You have activated your account.{html_end}'
'You will now receive email updates and alerts from us related to'
' the courses you are enrolled in. Sign In to continue.'
)
# Add message for later use.
messages.success(
request,
HTML(message).format(
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
else:
messages.info(
request,
HTML(_('{html_start}This account has already been activated.{html_end}')).format(
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(registration.user)
return redirect('dashboard')
@ensure_csrf_cookie
def activate_account_studio(request, key):
"""
When link in activation e-mail is clicked and the link belongs to studio.
"""
try:
registration = Registration.objects.get(activation_key=key)
except (Registration.DoesNotExist, Registration.MultipleObjectsReturned):
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
else:
user_logged_in = request.user.is_authenticated()
already_active = True
if not registration.user.is_active:
registration.activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(registration.user)
return render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request)
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(password):
"""
Validate password overall strength if ENFORCE_PASSWORD_POLICY is enable
otherwise only validate the length of the password.
Args:
password: the user's proposed new password.
Returns:
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
try:
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
validate_password_strength(password)
else:
validate_password_length(password)
except ValidationError as err:
return _('Password: ') + '; '.join(err.messages)
def validate_password_security_policy(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
return err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if request.method == 'POST':
password = request.POST['new_password1']
valid_link = False
error_message = validate_password_security_policy(user, password)
if not error_message:
# if security is not violated, we need to validate password
error_message = validate_password(password)
if error_message:
# password reset link will be valid if there is no security violation
valid_link = True
if error_message:
# We have a password reset attempt which violates some security
# policy, or any other validation. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': valid_link,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': error_message,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200:
form_valid = response.context_data['form'].is_valid() if response.context_data['form'] else False
if not form_valid:
log.warning(
u'Unable to reset password for user [%s] because form is not valid. '
u'A possible cause is that the user had an invalid reset token',
user.username,
)
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
registration = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
try:
context = generate_activation_email_context(user, registration)
except ObjectDoesNotExist:
log.error(
u'Unable to send reactivation email due to unavailable profile for the user "%s"',
user.username,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
})
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
from_address = configuration_helpers.get_value('ACTIVATION_EMAIL_FROM_ADDRESS', from_address)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = CourseKey.from_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
default_target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
@property
def target(self):
"""
If a redirect_url is specified in the querystring for this request, and the value is a url
with the same host, the view will redirect to this page after rendering the template.
If it is not specified, we will use the default target url.
"""
target_url = self.request.GET.get('redirect_url')
if target_url and is_safe_url(target_url, self.request.META.get('HTTP_HOST')):
return target_url
else:
return self.default_target
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
|
lduarte1991/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 126,253
|
[
"VisIt"
] |
fd6b1f8b65bd77fab797bb96dcf82d9947ac80a08f35e3ed11c78f3314529d68
|
"""
========================
Random Number Generation
========================
==================== =========================================================
Utility functions
==============================================================================
random_sample Uniformly distributed floats over ``[0, 1)``.
random Alias for `random_sample`.
bytes Uniformly distributed random bytes.
random_integers Uniformly distributed integers in a given range.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility functions
==============================================================================
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
randint Uniformly distributed integers in a given range.
==================== =========================================================
==================== =========================================================
Univariate distributions
==============================================================================
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== =========================================================
Multivariate distributions
==============================================================================
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== =========================================================
==================== =========================================================
Standard distributions
==============================================================================
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
==============================================================================
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
from __future__ import division, absolute_import, print_function
import warnings
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random_integers',
'random_sample',
'rayleigh',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf'
]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
from .mtrand import *
# Some aliases:
ranf = random = sample = random_sample
__all__.extend(['ranf', 'random', 'sample'])
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this function's
entire purpose is to return a newly allocated RandomState whose state pickle can set.
Consequently the RandomState returned by this function is a freshly allocated copy
with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
ryfeus/lambda-packs
|
pytorch/source/numpy/random/__init__.py
|
Python
|
mit
| 6,053
|
[
"Gaussian"
] |
46f2870ba1a93c211c65d18b6220c6dd11f9bc46c97ce90be1f3348a07c10000
|
###
### GPAW benchmark: Carbon Fullerenes on a Lead Surface
###
from __future__ import print_function
from gpaw.mpi import size, rank
from gpaw import GPAW, Mixer, ConvergenceError
from gpaw.occupations import FermiDirac
from ase.io import read
try:
from gpaw.eigensolvers.rmm_diis import RMM_DIIS
except ImportError:
from gpaw.eigensolvers.rmmdiis import RMMDIIS as RMM_DIIS
try:
from gpaw import use_mic
except ImportError:
use_mic = False
try:
from gpaw import use_cuda
use_cuda = True
except ImportError:
use_cuda = False
use_cpu = not (use_mic or use_cuda)
# grid spacing (decrease to scale up the system)
h = 0.22
# no. of k-points in Brillouin-zone sampling grid (an increase will lift
# the upper scaling limit, but also consume more memory and time)
kpts = (2,2,1)
# other parameters
input_coords = 'POSCAR'
txt = 'output.txt'
maxiter = 15
parallel = {'sl_default': (4,4,64)}
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Carbon Fullerenes on a Lead Surface")
print(" grid spacing: h=%f" % h)
print(" Brillouin-zone sampling: kpts=" + str(kpts))
print(" MPI tasks: %d" % size)
print(" using CUDA (GPGPU): " + str(use_cuda))
print(" using pyMIC (KNC) : " + str(use_mic))
print(" using CPU (or KNL): " + str(use_cpu))
print("#"*60)
print("")
# setup parameters
args = {'h': h,
'nbands': -180,
'occupations': FermiDirac(0.2),
'kpts': kpts,
'xc': 'PBE',
'mixer': Mixer(0.1, 5, 100),
'eigensolver': 'rmm-diis',
'maxiter': maxiter,
'xc_thread': False,
'txt': txt}
if use_cuda:
args['gpu'] = {'cuda': True, 'hybrid_blas': False}
try:
args['parallel'] = parallel
except: pass
# setup the system
atoms = read(input_coords)
calc = GPAW(**args)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
|
mlouhivu/gpaw-accelerator-benchmarks
|
fullerenes-on-surface/input.py
|
Python
|
mit
| 1,958
|
[
"ASE",
"GPAW"
] |
a28c6ba0cd50ee5953602e2270f420c898f5ac6db86f24ce248ac3472be4db2f
|
"""
Django module container for classes and operations related to the "Course Module" content type
"""
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule import course_metadata_utils
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.exceptions import UndefinedContext
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList
from xmodule.mixin import LicenseMixin
import json
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from .fields import Date
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(
help=_("List of pairs of (title, url) for textbooks used in this course"),
default=[],
scope=Scope.content
)
wiki_slug = String(help=_("Slug that points to the wiki for this course"), scope=Scope.content)
enrollment_start = Date(help=_("Date that enrollment for this class is opened"), scope=Scope.settings)
enrollment_end = Date(help=_("Date that enrollment for this class is closed"), scope=Scope.settings)
start = Date(
help=_("Start time when this module is visible"),
default=DEFAULT_START_DATE,
scope=Scope.settings
)
end = Date(help=_("Date that this class ends"), scope=Scope.settings)
cosmetic_display_price = Integer(
display_name=_("Cosmetic Course Display Price"),
help=_(
"The cost displayed to students for enrolling in the course. If a paid course registration price is "
"set by an administrator in the database, that price will be displayed instead of this one."
),
default=0,
scope=Scope.settings,
)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help=_("Grading policy definition for this class"),
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
#help=_("Enter the name of the course as it should appear in the edX.org course list."),
help=_("Enter the name of the course as it should appear in the course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
show_chat = Boolean(
display_name=_("Show Chat Widget"),
help=_("Enter true or false. When true, students can see the chat widget in the course."),
default=False,
scope=Scope.settings
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Inside the provided '
'brackets, enter an additional set of square brackets surrounding each pair of dates you add. '
'Format each pair of dates as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, '
'format each pair as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"]. Be sure to include the "T" between '
'the date and time. For example, an entry defining two blackout periods looks like this, including '
'the outer pair of square brackets: [["2015-09-15", "2015-09-21"], ["2015-10-01", "2015-10-08"]] '
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2015_T1"}. The "id" '
'value for each category must be unique. In "id" values, the only special characters that are '
'supported are underscore, hyphen, and period.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
#help=_("Enter the unique identifier for your course's video files provided by edX."),
help=_("Enter the unique identifier for your course's video files."),
scope=Scope.settings
)
facebook_url = String(
help=_(
"Enter the URL for the official course Facebook group. "
"If you provide a URL, the mobile app includes a button that students can tap to access the group."
),
default=None,
display_name=_("Facebook URL"),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
enable_ccx = Boolean(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
display_name=_("Enable CCX"),
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
#help=_(
# "Allow course instructors to assign CCX Coach roles, and allow coaches to manage Custom Courses on edX."
# " When false, Custom Courses cannot be created, but existing Custom Courses will be preserved."
#),
help=_(
"Allow course instructors to assign CCX Coach roles, and allow coaches to manage Custom Courses."
" When false, Custom Courses cannot be created, but existing Custom Courses will be preserved."
),
default=False,
scope=Scope.settings
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
checklists = List(
scope=Scope.settings,
default=[
{
"short_description": _("Getting Started With Studio"),
"items": [
{
"short_description": _("Add Course Team Members"),
"long_description": _(
"Grant your collaborators permission to edit your course so you can work together."
),
"is_checked": False,
"action_url": "ManageUsers",
"action_text": _("Edit Course Team"),
"action_external": False,
},
{
"short_description": _("Set Important Dates for Your Course"),
"long_description": _(
"Establish your course's student enrollment and launch dates on the Schedule and Details "
"page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Details & Schedule"),
"action_external": False,
},
{
"short_description": _("Draft Your Course's Grading Policy"),
"long_description": _(
"Set up your assignment types and grading policy even if you haven't created all your "
"assignments."
),
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": _("Edit Grading Settings"),
"action_external": False,
},
{
"short_description": _("Explore the Other Studio Checklists"),
"long_description": _(
"Discover other available course authoring tools, and find help when you need it."
),
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False,
},
],
},
{
"short_description": _("Draft a Rough Course Outline"),
"items": [
{
"short_description": _("Create Your First Section and Subsection"),
"long_description": _("Use your course outline to build your first Section and Subsection."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Set Section Release Dates"),
"long_description": _(
"Specify the release dates for each Section in your course. Sections become visible to "
"students on their release dates."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Designate a Subsection as Graded"),
"long_description": _(
"Set a Subsection to be graded as a specific assignment type. Assignments within graded "
"Subsections count toward a student's final grade."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Reordering Course Content"),
"long_description": _("Use drag and drop to reorder the content in your course."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Renaming Sections"),
"long_description": _("Rename Sections by clicking the Section name from the Course Outline."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Deleting Course Content"),
"long_description": _(
"Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is "
"no Undo function."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Add an Instructor-Only Section to Your Outline"),
"long_description": _(
"Some course authors find using a section for unsorted, in-progress work useful. To do "
"this, create a section and set the release date to the distant future."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
],
},
{
"short_description": _("Explore Support Tools"),
"items": [
{
"short_description": _("Explore the Studio Help Forum"),
"long_description": _(
"Access the Studio Help forum from the menu that appears when you click your user name "
"in the top right corner of Studio."
),
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": _("Visit Studio Help"),
"action_external": True,
},
{
"short_description": _("Enroll in edX 101"),
"long_description": _("Register for edX 101, edX's primer for course creation."),
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": _("Register for edX 101"),
"action_external": True,
},
{
"short_description": _("Download the Studio Documentation"),
"long_description": _("Download the searchable Studio reference documentation in PDF form."),
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": _("Download Documentation"),
"action_external": True,
},
],
},
{
"short_description": _("Draft Your Course About Page"),
"items": [
{
"short_description": _("Draft a Course Description"),
"long_description": _(
"Courses have an About page that includes a course video, description, and more. "
"Draft the text students will read before deciding to enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Staff Bios"),
"long_description": _(
"Showing prospective students who their instructor will be is helpful. "
"Include staff bios on the course About page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course FAQs"),
"long_description": _("Include a short list of frequently asked questions about your course."),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course Prerequisites"),
"long_description": _(
"Let students know what knowledge and/or skills they should have before "
"they enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
],
},
],
)
info_sidebar_name = String(
display_name=_("Course Info Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Info page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format for due dates. The default is Mon DD, YYYY. Enter \"%m-%d-%Y\" for MM-DD-YYYY, "
"\"%d-%m-%Y\" for DD-MM-YYYY, \"%Y-%m-%d\" for YYYY-MM-DD, or \"%Y-%d-%m\" for YYYY-DD-MM."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Enter end, early_with_info, or early_no_info. After certificate generation, students who passed see a "
"link to their certificates on the dashboard and students who did not pass see information about the "
"grading configuration. The default is end, which displays this certificate information to all students "
"after the course end date. To display this certificate information to all students as soon as "
"certificates are generated, enter early_with_info. To display only the links to passing students as "
"soon as certificates are generated, enter early_no_info."
),
scope=Scope.settings,
default="end"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
issue_badges = Boolean(
display_name=_("Issue Open Badges"),
help=_(
"Issue Open Badges badges for this course. Badges are generated when certificates are created."
),
scope=Scope.settings,
default=True
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the short name of the course to use on the certificate that "
"students receive when they complete the course."
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the long name of the course to use on the certificate that students "
"receive when they complete the course."
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
cert_html_view_overrides = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Web/HTML View Overrides"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific overrides for the Web/HTML template parameters here (JSON format)"),
scope=Scope.settings,
)
# Specific certificate information managed via Studio (should eventually fold other cert settings into this)
certificates = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Configuration"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific configuration information here (JSON format)"),
scope=Scope.settings,
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
social_sharing_url = String(
display_name=_("Social Media Sharing URL"),
help=_(
"If dashboard social sharing and custom course URLs are enabled, you can provide a URL "
"(such as the URL to a course About page) that social media sites can link to. URLs must "
"be fully qualified. For example: http://www.edx.org/course/Introduction-to-MOOCs-ITM001"
),
default=None,
scope=Scope.settings,
)
language = String(
display_name=_("Course Language"),
help=_("Specify the language of your course."),
default=None,
scope=Scope.settings
)
teams_configuration = Dict(
display_name=_("Teams Configuration"),
help=_(
"Enter configuration for the teams feature. Expects two entries: max_team_size and topics, where "
"topics is a list of topics."
),
scope=Scope.settings
)
minimum_grade_credit = Float(
display_name=_("Minimum Grade for Credit"),
help=_(
"The minimum grade that a learner must earn to receive credit in the course, "
"as a decimal between 0.0 and 1.0. For example, for 75%, enter 0.75."
),
default=0.8,
scope=Scope.settings,
)
class CourseModule(CourseFields, SequenceModule): # pylint: disable=abstract-method
"""
The CourseDescriptor needs its module_class to be a SequenceModule, but some code that
expects a CourseDescriptor to have all its fields can fail if it gets a SequenceModule instead.
This class is to make sure that all the fields are present in all cases.
"""
class CourseDescriptor(CourseFields, SequenceDescriptor, LicenseMixin):
"""
The descriptor for the course XModule
"""
module_class = CourseModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
# load license if it exists
definition = LicenseMixin.parse_license_from_xml(definition, xml_object)
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
# handle license specifically. Default the course to have a license
# of "All Rights Reserved", if a license is not explicitly set.
self.add_license_to_xml(xml_object, default="all-rights-reserved")
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
return course_metadata_utils.has_course_ended(self.end)
def may_certify(self):
"""
Return whether it is acceptable to show the student a certificate download link.
"""
return course_metadata_utils.may_certify_for_course(
self.certificates_display_behavior,
self.certificates_show_before_end,
self.has_ended()
)
def has_started(self):
return course_metadata_utils.has_course_started(self.start)
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
# If this descriptor has been bound to a student, return the corresponding
# XModule. If not, just use the descriptor itself
try:
module = getattr(self, '_xmodule', None)
if not module:
module = self
except UndefinedContext:
module = self
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for chapter in self.get_children():
for section in chapter.get_children():
if section.graded:
xmoduledescriptors = list(yield_descriptor_descendents(section))
xmoduledescriptors.append(section)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': section,
'xmoduledescriptors': [child for child in xmoduledescriptors if child.has_score]
}
section_format = section.format if section.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(section)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the desired text corresponding the course's start date and time in UTC. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
return course_metadata_utils.course_start_datetime_text(
self.start,
self.advertised_start,
format_string,
i18n.ugettext,
i18n.strftime
)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return course_metadata_utils.course_start_date_is_default(
self.start,
self.advertised_start
)
def end_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the end date or date_time for the course formatted as a string.
"""
return course_metadata_utils.course_end_datetime_text(
self.end,
format_string,
self.runtime.service(self, "i18n").strftime
)
def get_discussion_blackout_datetimes(self):
"""
Get a list of dicts with start and end fields with datetime values from
the discussion_blackouts setting
"""
date_proxy = Date()
try:
ret = [
{"start": date_proxy.from_json(start), "end": date_proxy.from_json(end)}
for start, end
in filter(None, self.discussion_blackouts)
]
for blackout in ret:
if not blackout["start"] or not blackout["end"]:
raise ValueError
return ret
except (TypeError, ValueError):
log.exception(
"Error parsing discussion_blackouts %s for course %s",
self.discussion_blackouts,
self.id
)
return []
@property
def forum_posts_allowed(self):
"""
Return whether forum posts are allowed by the discussion_blackouts
setting
"""
blackouts = self.get_discussion_blackout_datetimes()
now = datetime.now(UTC())
for blackout in blackouts:
if blackout["start"] <= now <= blackout["end"]:
return False
return True
@property
def number(self):
"""
Returns this course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
"""
return course_metadata_utils.number_for_course_location(self.location)
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
def clean_id(self, padding_char='='):
"""
Returns a unique deterministic base32-encoded ID for the course.
The optional padding_char parameter allows you to override the "=" character used for padding.
"""
return course_metadata_utils.clean_course_key(self.location.course_key, padding_char)
@property
def teams_enabled(self):
"""
Returns whether or not teams has been enabled for this course.
Currently, teams are considered enabled when at least one topic has been configured for the course.
"""
if self.teams_configuration:
return len(self.teams_configuration.get('topics', [])) > 0
return False
@property
def teams_max_size(self):
"""
Returns the max size for teams if teams has been configured, else None.
"""
return self.teams_configuration.get('max_team_size', None)
@property
def teams_topics(self):
"""
Returns the topics that have been configured for teams for this course, else None.
"""
return self.teams_configuration.get('topics', None)
|
abhilashnta/edx-platform
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 61,461
|
[
"VisIt"
] |
4a8d445b98380e29d6dce350b9e6eb72e686b39012bee08644fd0630571dcc81
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rdp_privacy_accountant."""
import math
import sys
from absl.testing import absltest
from absl.testing import parameterized
import mpmath
import numpy as np
from tensorflow_privacy.privacy.analysis import dp_event
from tensorflow_privacy.privacy.analysis import privacy_accountant
from tensorflow_privacy.privacy.analysis import privacy_accountant_test
from tensorflow_privacy.privacy.analysis import rdp_privacy_accountant
def _get_test_rdp(event, count=1):
accountant = rdp_privacy_accountant.RdpAccountant(orders=[2.71828])
accountant.compose(event, count)
return accountant._rdp[0]
def _log_float_mp(x):
# Convert multi-precision input to float log space.
if x >= sys.float_info.min:
return float(mpmath.log(x))
else:
return -np.inf
def _compute_a_mp(sigma, q, alpha):
"""Compute A_alpha for arbitrary alpha by numerical integration."""
def mu0(x):
return mpmath.npdf(x, mu=0, sigma=sigma)
def _mu_over_mu0(x, q, sigma):
return (1 - q) + q * mpmath.exp((2 * x - 1) / (2 * sigma**2))
def a_alpha_fn(z):
return mu0(z) * _mu_over_mu0(z, q, sigma)**alpha
bounds = (-mpmath.inf, mpmath.inf)
a_alpha, _ = mpmath.quad(a_alpha_fn, bounds, error=True, maxdegree=8)
return a_alpha
class RdpPrivacyAccountantTest(privacy_accountant_test.PrivacyAccountantTest,
parameterized.TestCase):
def _make_test_accountants(self):
return [
rdp_privacy_accountant.RdpAccountant(
[2.0], privacy_accountant.NeighboringRelation.ADD_OR_REMOVE_ONE),
rdp_privacy_accountant.RdpAccountant(
[2.0], privacy_accountant.NeighboringRelation.REPLACE_ONE)
]
def test_supports(self):
aor_accountant = rdp_privacy_accountant.RdpAccountant(
[2.0], privacy_accountant.NeighboringRelation.ADD_OR_REMOVE_ONE)
ro_accountant = rdp_privacy_accountant.RdpAccountant(
[2.0], privacy_accountant.NeighboringRelation.REPLACE_ONE)
event = dp_event.GaussianDpEvent(1.0)
self.assertTrue(aor_accountant.supports(event))
self.assertTrue(ro_accountant.supports(event))
event = dp_event.SelfComposedDpEvent(dp_event.GaussianDpEvent(1.0), 6)
self.assertTrue(aor_accountant.supports(event))
self.assertTrue(ro_accountant.supports(event))
event = dp_event.ComposedDpEvent(
[dp_event.GaussianDpEvent(1.0),
dp_event.GaussianDpEvent(2.0)])
self.assertTrue(aor_accountant.supports(event))
self.assertTrue(ro_accountant.supports(event))
event = dp_event.PoissonSampledDpEvent(0.1, dp_event.GaussianDpEvent(1.0))
self.assertTrue(aor_accountant.supports(event))
self.assertFalse(ro_accountant.supports(event))
composed_gaussian = dp_event.ComposedDpEvent(
[dp_event.GaussianDpEvent(1.0),
dp_event.GaussianDpEvent(2.0)])
event = dp_event.PoissonSampledDpEvent(0.1, composed_gaussian)
self.assertTrue(aor_accountant.supports(event))
self.assertFalse(ro_accountant.supports(event))
event = dp_event.SampledWithoutReplacementDpEvent(
1000, 10, dp_event.GaussianDpEvent(1.0))
self.assertFalse(aor_accountant.supports(event))
self.assertTrue(ro_accountant.supports(event))
event = dp_event.SampledWithoutReplacementDpEvent(1000, 10,
composed_gaussian)
self.assertFalse(aor_accountant.supports(event))
self.assertTrue(ro_accountant.supports(event))
event = dp_event.SampledWithReplacementDpEvent(
1000, 10, dp_event.GaussianDpEvent(1.0))
self.assertFalse(aor_accountant.supports(event))
self.assertFalse(ro_accountant.supports(event))
def test_rdp_composition(self):
base_event = dp_event.GaussianDpEvent(3.14159)
base_rdp = _get_test_rdp(base_event)
rdp_with_count = _get_test_rdp(base_event, count=6)
self.assertAlmostEqual(rdp_with_count, base_rdp * 6)
rdp_with_self_compose = _get_test_rdp(
dp_event.SelfComposedDpEvent(base_event, 6))
self.assertAlmostEqual(rdp_with_self_compose, base_rdp * 6)
rdp_with_self_compose_and_count = _get_test_rdp(
dp_event.SelfComposedDpEvent(base_event, 2), count=3)
self.assertAlmostEqual(rdp_with_self_compose_and_count, base_rdp * 6)
rdp_with_compose = _get_test_rdp(dp_event.ComposedDpEvent([base_event] * 6))
self.assertAlmostEqual(rdp_with_compose, base_rdp * 6)
rdp_with_compose_and_self_compose = _get_test_rdp(
dp_event.ComposedDpEvent([
dp_event.SelfComposedDpEvent(base_event, 1),
dp_event.SelfComposedDpEvent(base_event, 2),
dp_event.SelfComposedDpEvent(base_event, 3)
]))
self.assertAlmostEqual(rdp_with_compose_and_self_compose, base_rdp * 6)
base_event_2 = dp_event.GaussianDpEvent(1.61803)
base_rdp_2 = _get_test_rdp(base_event_2)
rdp_with_heterogeneous_compose = _get_test_rdp(
dp_event.ComposedDpEvent([base_event, base_event_2]))
self.assertAlmostEqual(rdp_with_heterogeneous_compose,
base_rdp + base_rdp_2)
def test_zero_poisson_sample(self):
accountant = rdp_privacy_accountant.RdpAccountant([3.14159])
accountant.compose(
dp_event.PoissonSampledDpEvent(0, dp_event.GaussianDpEvent(1.0)))
self.assertEqual(accountant.get_epsilon(1e-10), 0)
self.assertEqual(accountant.get_delta(1e-10), 0)
def test_zero_fixed_batch_sample(self):
accountant = rdp_privacy_accountant.RdpAccountant(
[3.14159], privacy_accountant.NeighboringRelation.REPLACE_ONE)
accountant.compose(
dp_event.SampledWithoutReplacementDpEvent(
1000, 0, dp_event.GaussianDpEvent(1.0)))
self.assertEqual(accountant.get_epsilon(1e-10), 0)
self.assertEqual(accountant.get_delta(1e-10), 0)
def test_epsilon_non_private_gaussian(self):
accountant = rdp_privacy_accountant.RdpAccountant([3.14159])
accountant.compose(dp_event.GaussianDpEvent(0))
self.assertEqual(accountant.get_epsilon(1e-1), np.inf)
def test_compute_rdp_gaussian(self):
alpha = 3.14159
sigma = 2.71828
event = dp_event.GaussianDpEvent(sigma)
accountant = rdp_privacy_accountant.RdpAccountant(orders=[alpha])
accountant.compose(event)
self.assertAlmostEqual(accountant._rdp[0], alpha / (2 * sigma**2))
def test_compute_rdp_multi_gaussian(self):
alpha = 3.14159
sigma1, sigma2 = 2.71828, 6.28319
rdp1 = alpha / (2 * sigma1**2)
rdp2 = alpha / (2 * sigma2**2)
rdp = rdp1 + rdp2
accountant = rdp_privacy_accountant.RdpAccountant(orders=[alpha])
accountant.compose(
dp_event.PoissonSampledDpEvent(
1.0,
dp_event.ComposedDpEvent([
dp_event.GaussianDpEvent(sigma1),
dp_event.GaussianDpEvent(sigma2)
])))
self.assertAlmostEqual(accountant._rdp[0], rdp)
def test_effective_gaussian_noise_multiplier(self):
np.random.seed(0xBAD5EED)
sigmas = np.random.uniform(size=(4,))
event = dp_event.ComposedDpEvent([
dp_event.GaussianDpEvent(sigmas[0]),
dp_event.SelfComposedDpEvent(dp_event.GaussianDpEvent(sigmas[1]), 3),
dp_event.ComposedDpEvent([
dp_event.GaussianDpEvent(sigmas[2]),
dp_event.GaussianDpEvent(sigmas[3])
])
])
sigma = rdp_privacy_accountant._effective_gaussian_noise_multiplier(event)
multi_sigmas = list(sigmas) + [sigmas[1]] * 2
expected = sum(s**-2 for s in multi_sigmas)**-0.5
self.assertAlmostEqual(sigma, expected)
def test_compute_rdp_poisson_sampled_gaussian(self):
orders = [1.5, 2.5, 5, 50, 100, np.inf]
noise_multiplier = 2.5
sampling_probability = 0.01
count = 50
event = dp_event.SelfComposedDpEvent(
dp_event.PoissonSampledDpEvent(
sampling_probability, dp_event.GaussianDpEvent(noise_multiplier)),
count)
accountant = rdp_privacy_accountant.RdpAccountant(orders=orders)
accountant.compose(event)
self.assertTrue(
np.allclose(
accountant._rdp, [
6.5007e-04, 1.0854e-03, 2.1808e-03, 2.3846e-02, 1.6742e+02,
np.inf
],
rtol=1e-4))
def test_compute_epsilon_delta_pure_dp(self):
orders = range(2, 33)
rdp = [1.1 for o in orders] # Constant corresponds to pure DP.
epsilon = rdp_privacy_accountant._compute_epsilon(orders, rdp, delta=1e-5)
# Compare with epsilon computed by hand.
self.assertAlmostEqual(epsilon, 1.32783806176)
delta = rdp_privacy_accountant._compute_delta(
orders, rdp, epsilon=1.32783806176)
self.assertAlmostEqual(delta, 1e-5)
def test_compute_epsilon_delta_gaussian(self):
orders = [0.001 * i for i in range(1000, 100000)]
# noise multiplier is chosen to obtain exactly (1,1e-6)-DP.
rdp = rdp_privacy_accountant._compute_rdp_poisson_subsampled_gaussian(
1, 4.530877117, orders)
eps = rdp_privacy_accountant._compute_epsilon(orders, rdp, delta=1e-6)
self.assertAlmostEqual(eps, 1)
delta = rdp_privacy_accountant._compute_delta(orders, rdp, epsilon=1)
self.assertAlmostEqual(delta, 1e-6)
params = ({
'q': 1e-7,
'sigma': .1,
'order': 1.01
}, {
'q': 1e-6,
'sigma': .1,
'order': 256
}, {
'q': 1e-5,
'sigma': .1,
'order': 256.1
}, {
'q': 1e-6,
'sigma': 1,
'order': 27
}, {
'q': 1e-4,
'sigma': 1.,
'order': 1.5
}, {
'q': 1e-3,
'sigma': 1.,
'order': 2
}, {
'q': .01,
'sigma': 10,
'order': 20
}, {
'q': .1,
'sigma': 100,
'order': 20.5
}, {
'q': .99,
'sigma': .1,
'order': 256
}, {
'q': .999,
'sigma': 100,
'order': 256.1
})
# pylint:disable=undefined-variable
@parameterized.parameters(p for p in params)
def test_compute_log_a_equals_mp(self, q, sigma, order):
# Compare the cheap computation of log(A) with an expensive, multi-precision
# computation.
log_a = rdp_privacy_accountant._compute_log_a(q, sigma, order)
log_a_mp = _log_float_mp(_compute_a_mp(sigma, q, order))
np.testing.assert_allclose(log_a, log_a_mp, rtol=1e-4)
def test_delta_bounds_gaussian(self):
# Compare the optimal bound for Gaussian with the one derived from RDP.
# Also compare the RDP upper bound with the "standard" upper bound.
orders = [0.1 * x for x in range(10, 505)]
eps_vec = [0.1 * x for x in range(500)]
rdp = rdp_privacy_accountant._compute_rdp_poisson_subsampled_gaussian(
1, 1, orders)
for eps in eps_vec:
delta = rdp_privacy_accountant._compute_delta(orders, rdp, epsilon=eps)
# For comparison, we compute the optimal guarantee for Gaussian
# using https://arxiv.org/abs/1805.06530 Theorem 8 (in v2).
delta0 = math.erfc((eps - .5) / math.sqrt(2)) / 2
delta0 = delta0 - math.exp(eps) * math.erfc((eps + .5) / math.sqrt(2)) / 2
self.assertLessEqual(delta0, delta + 1e-300) # need tolerance 10^-300
# Compute the "standard" upper bound, which should be an upper bound.
# Note, if orders is too sparse, this will NOT be an upper bound.
if eps >= 0.5:
delta1 = math.exp(-0.5 * (eps - 0.5)**2)
else:
delta1 = 1
self.assertLessEqual(delta, delta1 + 1e-300)
def test_epsilon_delta_consistency(self):
orders = range(2, 50) # Large range of orders (helps test for overflows).
for q in [0, 0.01, 0.1, 0.8, 1.]:
for multiplier in [0.0, 0.1, 1., 10., 100.]:
event = dp_event.PoissonSampledDpEvent(
q, dp_event.GaussianDpEvent(multiplier))
accountant = rdp_privacy_accountant.RdpAccountant(orders)
accountant.compose(event)
for delta in [.99, .9, .1, .01, 1e-3, 1e-5, 1e-9, 1e-12]:
epsilon = accountant.get_epsilon(delta)
delta2 = accountant.get_delta(epsilon)
if np.isposinf(epsilon):
self.assertEqual(delta2, 1.0)
elif epsilon == 0:
self.assertLessEqual(delta2, delta)
else:
self.assertAlmostEqual(delta, delta2)
if __name__ == '__main__':
absltest.main()
|
tensorflow/privacy
|
tensorflow_privacy/privacy/analysis/rdp_privacy_accountant_test.py
|
Python
|
apache-2.0
| 12,865
|
[
"Gaussian"
] |
d3716cacaa0c86b5945601f58144c3b81f7fdb0e9ef6653b9e1656ae61065f96
|
# Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools and calculations for interpolating specifically to a grid."""
import numpy as np
from .points import (interpolate_to_points, inverse_distance_to_points,
natural_neighbor_to_points)
from ..package_tools import Exporter
from ..pandas import preprocess_pandas
exporter = Exporter(globals())
def generate_grid(horiz_dim, bbox):
r"""Generate a meshgrid based on bounding box and x & y resolution.
Parameters
----------
horiz_dim: integer
Horizontal resolution
bbox: dictionary
Dictionary containing coordinates for corners of study area.
Returns
-------
grid_x: (X, Y) ndarray
X dimension meshgrid defined by given bounding box
grid_y: (X, Y) ndarray
Y dimension meshgrid defined by given bounding box
"""
x_steps, y_steps = get_xy_steps(bbox, horiz_dim)
grid_x = np.linspace(bbox['west'], bbox['east'], x_steps)
grid_y = np.linspace(bbox['south'], bbox['north'], y_steps)
gx, gy = np.meshgrid(grid_x, grid_y)
return gx, gy
def generate_grid_coords(gx, gy):
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
(X, Y) ndarray
List of coordinates in meshgrid
"""
return np.stack([gx.ravel(), gy.ravel()], axis=1)
def get_xy_range(bbox):
r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float
Range in meters in y dimension.
"""
x_range = bbox['east'] - bbox['west']
y_range = bbox['north'] - bbox['south']
return x_range, y_range
def get_xy_steps(bbox, h_dim):
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension.
"""
x_range, y_range = get_xy_range(bbox)
x_steps = np.ceil(x_range / h_dim)
y_steps = np.ceil(y_range / h_dim)
return int(x_steps), int(y_steps)
def get_boundary_coords(x, y, spatial_pad=0):
r"""Return bounding box based on given x and y coordinates assuming northern hemisphere.
x: numeric
x coordinates.
y: numeric
y coordinates.
spatial_pad: numeric
Number of meters to add to the x and y dimensions to reduce
edge effects.
Returns
-------
bbox: dictionary
dictionary containing coordinates for corners of study area
"""
west = np.min(x) - spatial_pad
east = np.max(x) + spatial_pad
north = np.max(y) + spatial_pad
south = np.min(y) - spatial_pad
return {'west': west, 'south': south, 'east': east, 'north': north}
@exporter.export
def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y):
r"""Generate a natural neighbor interpolation of the given points to a regular grid.
This assigns values to the given grid using the Liang and Hale [Liang2010]_.
approach.
Parameters
----------
xp: (P, ) ndarray
x-coordinates of observations
yp: (P, ) ndarray
y-coordinates of observations
variable: (P, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_x: (M, N) ndarray
Meshgrid associated with x dimension
grid_y: (M, N) ndarray
Meshgrid associated with y dimension
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
natural_neighbor_to_points
"""
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = list(zip(xp, yp))
points_grid = generate_grid_coords(grid_x, grid_y)
img = natural_neighbor_to_points(points_obs, variable, points_grid)
return img.reshape(grid_x.shape)
@exporter.export
def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None,
min_neighbors=3, kind='cressman'):
r"""Generate an inverse distance interpolation of the given points to a regular grid.
Values are assigned to the given grid using inverse distance weighting based on either
[Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations.
yp: (N, ) ndarray
y-coordinates of observations.
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i]).
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension.
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
Specify what inverse distance weighting interpolation to use.
Options: 'cressman' or 'barnes'. Default 'cressman'
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
inverse_distance_to_points
"""
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = list(zip(xp, yp))
points_grid = generate_grid_coords(grid_x, grid_y)
img = inverse_distance_to_points(points_obs, variable, points_grid, r, gamma=gamma,
kappa=kappa, min_neighbors=min_neighbors, kind=kind)
return img.reshape(grid_x.shape)
@exporter.export
@preprocess_pandas
def interpolate_to_grid(x, y, z, interp_type='linear', hres=50000,
minimum_neighbors=3, gamma=0.25, kappa_star=5.052,
search_radius=None, rbf_func='linear', rbf_smooth=0,
boundary_coords=None):
r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters.
Parameters
----------
x: array_like
x coordinate, can have units of linear distance or degrees
y: array_like
y coordinate, can have units of linear distance or degrees
z: array_like
observation value
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
hres: float
The horizontal resolution of the generated grid, given in the same units as the
x and y parameters. Default 50000.
minimum_neighbors: int
Minimum number of neighbors needed to perform Barnes or Cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the Barnes and Cressman interpolation schemes.
If search_radius is not specified, it will default to 5 times the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Default 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
boundary_coords: dictionary
Optional dictionary containing coordinates of the study area boundary. Dictionary
should be in format: {'west': west, 'south': south, 'east': east, 'north': north}
Returns
-------
grid_x: (N, 2) ndarray
Meshgrid for the resulting interpolation in the x dimension
grid_y: (N, 2) ndarray
Meshgrid for the resulting interpolation in the y dimension ndarray
img: (M, N) ndarray
2-dimensional array representing the interpolated values for each grid.
See Also
--------
interpolate_to_points
Notes
-----
This function acts as a wrapper for `interpolate_points` to allow it to generate a regular
grid.
This function interpolates points to a Cartesian plane, even if lat/lon coordinates
are provided.
"""
# Generate the grid
if boundary_coords is None:
boundary_coords = get_boundary_coords(x, y)
grid_x, grid_y = generate_grid(hres, boundary_coords)
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = np.array(list(zip(x, y)))
points_grid = generate_grid_coords(grid_x, grid_y)
img = interpolate_to_points(points_obs, z, points_grid, interp_type=interp_type,
minimum_neighbors=minimum_neighbors, gamma=gamma,
kappa_star=kappa_star, search_radius=search_radius,
rbf_func=rbf_func, rbf_smooth=rbf_smooth)
return grid_x, grid_y, img.reshape(grid_x.shape)
@exporter.export
@preprocess_pandas
def interpolate_to_isosurface(level_var, interp_var, level, bottom_up_search=True):
r"""Linear interpolation of a variable to a given vertical level from given values.
This function assumes that highest vertical level (lowest pressure) is zeroth index.
A classic use of this function would be to compute the potential temperature on the
dynamic tropopause (2 PVU surface).
Parameters
----------
level_var: array_like (P, M, N)
Level values in 3D grid on common vertical coordinate (e.g., PV values on
isobaric levels). Assumes height dimension is highest to lowest in atmosphere.
interp_var: array_like (P, M, N)
Variable on 3D grid with same vertical coordinate as level_var to interpolate to
given level (e.g., potential temperature on isobaric levels)
level: int or float
Desired interpolated level (e.g., 2 PVU surface)
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
interp_level: (M, N) ndarray
The interpolated variable (e.g., potential temperature) on the desired level (e.g.,
2 PVU surface)
Notes
-----
This function implements a linear interpolation to estimate values on a given surface.
The prototypical example is interpolation of potential temperature to the dynamic
tropopause (e.g., 2 PVU surface)
"""
from ..calc import find_bounding_indices
# Find index values above and below desired interpolated surface value
above, below, good = find_bounding_indices(level_var, [level], axis=0,
from_below=bottom_up_search)
# Linear interpolation of variable to interpolated surface value
interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above]))
* (interp_var[below] - interp_var[above])) + interp_var[above]
# Handle missing values and instances where no values for surface exist above and below
interp_level[~good] = np.nan
minvar = (np.min(level_var, axis=0) >= level)
maxvar = (np.max(level_var, axis=0) <= level)
interp_level[0][minvar] = interp_var[-1][minvar]
interp_level[0][maxvar] = interp_var[0][maxvar]
return interp_level.squeeze()
|
Unidata/MetPy
|
src/metpy/interpolate/grid.py
|
Python
|
bsd-3-clause
| 12,473
|
[
"Gaussian"
] |
34e3d9a29f6795f328d19036363da8a55b45e33c179a5d5790bea5e25b56d337
|
import matplotlib
import pylab as plt
from scipy import interpolate
from Pyheana.load.load_data import *
def plot_slices(data):
[n_cols, n_slices, n_turns] = data.shape
A = data[2,:,:] * data[13,:,:]
# Color cycle
n_colors = n_turns
colmap = plt.get_cmap('jet')
c = [colmap(i) for i in plt.linspace(0., 1., n_colors)]
fig1 = plt.figure(figsize=(12, 8))
# ax1 = plt.gca()
# [ax1.plot(A[:,i], c=c[i]) for i in range(1, n_turns, 1)]
# plt.show()
# Smoothing
X = plt.arange(0, n_slices, 1)
Y = plt.arange(0, n_turns, 1)
A = A[X,:][:,Y]
Xi = plt.linspace(X[0], X[-1], 1000)
Yi = plt.linspace(Y[0], Y[-1], 1000)
sp = interpolate.RectBivariateSpline(X, Y, A)
Ai = sp(Xi, Yi)
X, Y = plt.meshgrid(X, Y)
X, Y = X.T, Y.T
Xi, Yi = plt.meshgrid(Xi, Yi)
Xi, Yi = Xi.T, Yi.T
#fig = figure(1)
#ax3d = fig.gca(projection='3d')
#pl = ax3d.plot_wireframe(Xi, Yi, Ai, \
#rstride=ns, cstride=ns, cmap=cm.jet, linewidth=0.1, alpha=0.3)
#cset = ax3d.contourf(Xi, Yi, Ai, zdir='z')#, offset=-100)
#cset = ax3d.contourf(Xi, Yi, Ai, zdir='x')#, offset=-40)
#cset = ax3d.contourf(Xi, Yi, Ai, zdir='y')#, offset=40)
#ax3d.zaxis.set_major_locator(LinearLocator(10))
#ax3d.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#ax3d.view_init(elev=25, azim=-150)
#ax3d.set_xlabel('Slices')
#ax3d.set_ylabel('Turns')
#fig = figure(2)
#ax3d = fig.gca(projection='3d')
#pl = ax3d.plot_wireframe(X, Y, A, \
#rstride=ns, cstride=ns, cmap=cm.jet, linewidth=0.1, alpha=0.3)
#cset = ax3d.contourf(X, Y, A, zdir='z')#, offset=-100)
#cset = ax3d.contourf(X, Y, A, zdir='x')#, offset=-40)
#cset = ax3d.contourf(X, Y, A, zdir='y')#, offset=40)
#ax3d.zaxis.set_major_locator(LinearLocator(10))
#ax3d.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#ax3d.view_init(elev=25, azim=-150)
#show()
from mayavi.modules.grid_plane import GridPlane
from mayavi.modules.outline import Outline
from mayavi.modules.volume import Volume
from mayavi.scripts import mayavi2
from mayavi import mlab
mlab.options.backend = 'envisage'
# graphics card driver problem
# workaround by casting int in line 246 of enthought/mayavi/tools/figure.py
#mlab.options.offscreen = True
#enthought.mayavi.engine.current_scene.scene.off_screen_rendering = True
mlab.figure(bgcolor=(1,1,1), fgcolor=(0.2,0.2,0.2))
aspect = (0, 10, 0, 20, -6, 6)
ranges = (plt.amin(X), plt.amax(X), plt.amin(Y), plt.amax(Y), plt.amin(A), plt.amax(A))
# s = mlab.surf(Xi, Yi, Ai, colormap='jet', representation='surface',
# warp_scale=1e-3)
s = mlab.surf(Xi, Yi, Ai, colormap='jet', representation='surface', extent=aspect,
warp_scale='auto')
mlab.outline(line_width=1)
mlab.axes(x_axis_visibility=True,
xlabel='Slice No.', ylabel='Turn No.', zlabel='BPM signal', ranges=ranges)
#mlab.title(('Electron cloud dynamics - slice passage: %03d/%03d' % (i, n_blocks)), size=0.25)
mlab.view(azimuth=230, elevation=60)
mlab.show()
|
like2000/Pyheana
|
display/plot_slices.py
|
Python
|
gpl-2.0
| 3,174
|
[
"Mayavi"
] |
e4b50134ce6b1191576fa93b0d3dcf89e3edf23ab44c392c8de18eef269646cc
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','ti.intercom.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','TiIntercomModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
simonrand/TiIntercom
|
build.py
|
Python
|
mit
| 6,787
|
[
"VisIt"
] |
a26192638d4b7488dd8c25885d5b0787697a7c58b1f26668f01ebd14c3ccb568
|
# -*- coding: utf-8 -*-
"""
Generators for random graphs.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import itertools
import math
import random
import networkx as nx
from .classic import empty_graph, path_graph, complete_graph
from .degree_seq import degree_sequence_tree
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'])
__all__ = ['fast_gnp_random_graph',
'gnp_random_graph',
'dense_gnm_random_graph',
'gnm_random_graph',
'erdos_renyi_graph',
'binomial_graph',
'newman_watts_strogatz_graph',
'watts_strogatz_graph',
'connected_watts_strogatz_graph',
'random_regular_graph',
'barabasi_albert_graph',
'powerlaw_cluster_graph',
'duplication_divergence_graph',
'random_lobster',
'random_shell_graph',
'random_powerlaw_tree',
'random_powerlaw_tree_sequence']
#-------------------------------------------------------------------------
# Some Famous Random Graphs
#-------------------------------------------------------------------------
def fast_gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If ``True``, this function returns a directed graph.
Notes
-----
The `G_{n,p}` graph algorithm chooses each of the `[n (n - 1)] / 2`
(undirected) or `n (n - 1)` (directed) possible edges with probability `p`.
This algorithm runs in `O(n + m)` time, where `m` is the expected number of
edges, which equals `p n (n - 1) / 2`. This should be faster than
:func:`gnp_random_graph` when `p` is small and the expected number of edges
is small (that is, the graph is sparse).
See Also
--------
gnp_random_graph
References
----------
.. [1] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
G = empty_graph(n)
G.name="fast_gnp_random_graph(%s,%s)"%(n,p)
if not seed is None:
random.seed(seed)
if p <= 0 or p >= 1:
return nx.gnp_random_graph(n,p,directed=directed)
w = -1
lp = math.log(1.0 - p)
if directed:
G = nx.DiGraph(G)
# Nodes in graph are from 0,n-1 (start with v as the first node index).
v = 0
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
if v == w: # avoid self loops
w = w + 1
while v < n <= w:
w = w - n
v = v + 1
if v == w: # avoid self loops
w = w + 1
if v < n:
G.add_edge(v, w)
else:
# Nodes in graph are from 0,n-1 (start with v as the second node index).
v = 1
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
while w >= v and v < n:
w = w - v
v = v + 1
if v < n:
G.add_edge(v, w)
return G
def gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
The `G_{n,p}` model chooses each of the possible edges with probability
``p``.
The functions :func:`binomial_graph` and :func:`erdos_renyi_graph` are
aliases of this function.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If ``True``, this function returns a directed graph.
See Also
--------
fast_gnp_random_graph
Notes
-----
This algorithm runs in `O(n^2)` time. For sparse graphs (that is, for
small values of `p`), :func:`fast_gnp_random_graph` is a faster algorithm.
References
----------
.. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959).
.. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
"""
if directed:
G=nx.DiGraph()
else:
G=nx.Graph()
G.add_nodes_from(range(n))
G.name="gnp_random_graph(%s,%s)"%(n,p)
if p<=0:
return G
if p>=1:
return complete_graph(n,create_using=G)
if not seed is None:
random.seed(seed)
if G.is_directed():
edges=itertools.permutations(range(n),2)
else:
edges=itertools.combinations(range(n),2)
for e in edges:
if random.random() < p:
G.add_edge(*e)
return G
# add some aliases to common names
binomial_graph=gnp_random_graph
erdos_renyi_graph=gnp_random_graph
def dense_gnm_random_graph(n, m, seed=None):
"""Returns a `G_{n,m}` random graph.
In the `G_{n,m}` model, a graph is chosen uniformly at random from the set
of all graphs with `n` nodes and `m` edges.
This algorithm should be faster than :func:`gnm_random_graph` for dense
graphs.
Parameters
----------
n : int
The number of nodes.
m : int
The number of edges.
seed : int, optional
Seed for random number generator (default=None).
See Also
--------
gnm_random_graph()
Notes
-----
Algorithm by Keith M. Briggs Mar 31, 2006.
Inspired by Knuth's Algorithm S (Selection sampling technique),
in section 3.4.2 of [1]_.
References
----------
.. [1] Donald E. Knuth, The Art of Computer Programming,
Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997.
"""
mmax=n*(n-1)/2
if m>=mmax:
G=complete_graph(n)
else:
G=empty_graph(n)
G.name="dense_gnm_random_graph(%s,%s)"%(n,m)
if n==1 or m>=mmax:
return G
if seed is not None:
random.seed(seed)
u=0
v=1
t=0
k=0
while True:
if random.randrange(mmax-t)<m-k:
G.add_edge(u,v)
k+=1
if k==m: return G
t+=1
v+=1
if v==n: # go to next row of adjacency matrix
u+=1
v=u+1
def gnm_random_graph(n, m, seed=None, directed=False):
"""Returns a `G_{n,m}` random graph.
In the `G_{n,m}` model, a graph is chosen uniformly at random from the set
of all graphs with `n` nodes and `m` edges.
This algorithm should be faster than :func:`dense_gnm_random_graph` for
sparse graphs.
Parameters
----------
n : int
The number of nodes.
m : int
The number of edges.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If True return a directed graph
See also
--------
dense_gnm_random_graph
"""
if directed:
G=nx.DiGraph()
else:
G=nx.Graph()
G.add_nodes_from(range(n))
G.name="gnm_random_graph(%s,%s)"%(n,m)
if seed is not None:
random.seed(seed)
if n==1:
return G
max_edges=n*(n-1)
if not directed:
max_edges/=2.0
if m>=max_edges:
return complete_graph(n,create_using=G)
nlist = list(G)
edge_count=0
while edge_count < m:
# generate random edge,u,v
u = random.choice(nlist)
v = random.choice(nlist)
if u==v or G.has_edge(u,v):
continue
else:
G.add_edge(u,v)
edge_count=edge_count+1
return G
def newman_watts_strogatz_graph(n, k, p, seed=None):
"""Return a Newman–Watts–Strogatz small-world graph.
Parameters
----------
n : int
The number of nodes.
k : int
Each node is joined with its ``k`` nearest neighbors in a ring
topology.
p : float
The probability of adding a new edge for each edge.
seed : int, optional
The seed for the random number generator (the default is ``None``).
Notes
-----
First create a ring over ``n`` nodes. Then each node in the ring is
connected with its ``k`` nearest neighbors (or ``k - 1`` neighbors if ``k``
is odd). Then shortcuts are created by adding new edges as follows: for
each edge ``(u, v)`` in the underlying "``n``-ring with ``k`` nearest
neighbors" with probability ``p`` add a new edge ``(u, w)`` with
randomly-chosen existing node ``w``. In contrast with
:func:`watts_strogatz_graph`, no edges are removed.
See Also
--------
watts_strogatz_graph()
References
----------
.. [1] M. E. J. Newman and D. J. Watts,
Renormalization group analysis of the small-world network model,
Physics Letters A, 263, 341, 1999.
http://dx.doi.org/10.1016/S0375-9601(99)00757-4
"""
if seed is not None:
random.seed(seed)
if k>=n:
raise nx.NetworkXError("k>=n, choose smaller k or larger n")
G=empty_graph(n)
G.name="newman_watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
nlist = list(G.nodes())
fromv = nlist
# connect the k/2 neighbors
for j in range(1, k // 2+1):
tov = fromv[j:] + fromv[0:j] # the first j are now last
for i in range(len(fromv)):
G.add_edge(fromv[i], tov[i])
# for each edge u-v, with probability p, randomly select existing
# node w and add new edge u-w
e = list(G.edges())
for (u, v) in e:
if random.random() < p:
w = random.choice(nlist)
# no self-loops and reject if edge u-w exists
# is that the correct NWS model?
while w == u or G.has_edge(u, w):
w = random.choice(nlist)
if G.degree(u) >= n-1:
break # skip this rewiring
else:
G.add_edge(u,w)
return G
def watts_strogatz_graph(n, k, p, seed=None):
"""Return a Watts–Strogatz small-world graph.
Parameters
----------
n : int
The number of nodes
k : int
Each node is joined with its ``k`` nearest neighbors in a ring
topology.
p : float
The probability of rewiring each edge
seed : int, optional
Seed for random number generator (default=None)
See Also
--------
newman_watts_strogatz_graph()
connected_watts_strogatz_graph()
Notes
-----
First create a ring over ``n`` nodes. Then each node in the ring is joined
to its ``k`` nearest neighbors (or ``k - 1`` neighbors if ``k`` is odd).
Then shortcuts are created by replacing some edges as follows: for each
edge ``(u, v)`` in the underlying "``n``-ring with ``k`` nearest neighbors"
with probability ``p`` replace it with a new edge ``(u, w)`` with uniformly
random choice of existing node ``w``.
In contrast with :func:`newman_watts_strogatz_graph`, the random rewiring
does not increase the number of edges. The rewired graph is not guaranteed
to be connected as in :func:`connected_watts_strogatz_graph`.
References
----------
.. [1] Duncan J. Watts and Steven H. Strogatz,
Collective dynamics of small-world networks,
Nature, 393, pp. 440--442, 1998.
"""
if k>=n:
raise nx.NetworkXError("k>=n, choose smaller k or larger n")
if seed is not None:
random.seed(seed)
G = nx.Graph()
G.name="watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
nodes = list(range(n)) # nodes are labeled 0 to n-1
# connect each node to k/2 neighbors
for j in range(1, k // 2+1):
targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
G.add_edges_from(zip(nodes,targets))
# rewire edges from each node
# loop over all nodes in order (label) and neighbors in order (distance)
# no self loops or multiple edges allowed
for j in range(1, k // 2+1): # outer loop is neighbors
targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
# inner loop in node order
for u,v in zip(nodes,targets):
if random.random() < p:
w = random.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = random.choice(nodes)
if G.degree(u) >= n-1:
break # skip this rewiring
else:
G.remove_edge(u,v)
G.add_edge(u,w)
return G
def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None):
"""Returns a connected Watts–Strogatz small-world graph.
Attempts to generate a connected graph by repeated generation of
Watts–Strogatz small-world graphs. An exception is raised if the maximum
number of tries is exceeded.
Parameters
----------
n : int
The number of nodes
k : int
Each node is joined with its ``k`` nearest neighbors in a ring
topology.
p : float
The probability of rewiring each edge
tries : int
Number of attempts to generate a connected graph.
seed : int, optional
The seed for random number generator.
See Also
--------
newman_watts_strogatz_graph()
watts_strogatz_graph()
"""
for i in range(tries):
G = watts_strogatz_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def random_regular_graph(d, n, seed=None):
"""Returns a random ``d``-regular graph on ``n`` nodes.
The resulting graph has no self-loops or parallel edges.
Parameters
----------
d : int
The degree of each node.
n : integer
The number of nodes. The value of ``n * d`` must be even.
seed : hashable object
The seed for random number generator.
Notes
-----
The nodes are numbered from ``0`` to ``n - 1``.
Kim and Vu's paper [2]_ shows that this algorithm samples in an
asymptotically uniform way from the space of random graphs when
`d = O(n^{1 / 3 - \epsilon})`.
Raises
------
NetworkXError
If ``n * d`` is odd or ``d`` is greater than or equal to ``n``.
References
----------
.. [1] A. Steger and N. Wormald,
Generating random regular graphs quickly,
Probability and Computing 8 (1999), 377-396, 1999.
http://citeseer.ist.psu.edu/steger99generating.html
.. [2] Jeong Han Kim and Van H. Vu,
Generating random regular graphs,
Proceedings of the thirty-fifth ACM symposium on Theory of computing,
San Diego, CA, USA, pp 213--222, 2003.
http://portal.acm.org/citation.cfm?id=780542.780576
"""
if (n * d) % 2 != 0:
raise nx.NetworkXError("n * d must be even")
if not 0 <= d < n:
raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied")
if d == 0:
return empty_graph(n)
if seed is not None:
random.seed(seed)
def _suitable(edges, potential_edges):
# Helper subroutine to check if there are suitable edges remaining
# If False, the generation of the graph has failed
if not potential_edges:
return True
for s1 in potential_edges:
for s2 in potential_edges:
# Two iterators on the same dictionary are guaranteed
# to visit it in the same order if there are no
# intervening modifications.
if s1 == s2:
# Only need to consider s1-s2 pair one time
break
if s1 > s2:
s1, s2 = s2, s1
if (s1, s2) not in edges:
return True
return False
def _try_creation():
# Attempt to create an edge set
edges = set()
stubs = list(range(n)) * d
while stubs:
potential_edges = defaultdict(lambda: 0)
random.shuffle(stubs)
stubiter = iter(stubs)
for s1, s2 in zip(stubiter, stubiter):
if s1 > s2:
s1, s2 = s2, s1
if s1 != s2 and ((s1, s2) not in edges):
edges.add((s1, s2))
else:
potential_edges[s1] += 1
potential_edges[s2] += 1
if not _suitable(edges, potential_edges):
return None # failed to find suitable edge set
stubs = [node for node, potential in potential_edges.items()
for _ in range(potential)]
return edges
# Even though a suitable edge set exists,
# the generation of such a set is not guaranteed.
# Try repeatedly to find one.
edges = _try_creation()
while edges is None:
edges = _try_creation()
G = nx.Graph()
G.name = "random_regular_graph(%s, %s)" % (d, n)
G.add_edges_from(edges)
return G
def _random_subset(seq,m):
""" Return m unique elements from seq.
This differs from random.sample which can return repeated
elements if seq holds repeated elements.
"""
targets=set()
while len(targets)<m:
x=random.choice(seq)
targets.add(x)
return targets
def barabasi_albert_graph(n, m, seed=None):
"""Returns a random graph according to the Barabási–Albert preferential
attachment model.
A graph of ``n`` nodes is grown by attaching new nodes each with ``m``
edges that are preferentially attached to existing nodes with high degree.
Parameters
----------
n : int
Number of nodes
m : int
Number of edges to attach from a new node to existing nodes
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Graph
Raises
------
NetworkXError
If ``m`` does not satisfy ``1 <= m < n``.
References
----------
.. [1] A. L. Barabási and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
if m < 1 or m >=n:
raise nx.NetworkXError("Barabási–Albert network must have m >= 1"
" and m < n, m = %d, n = %d" % (m, n))
if seed is not None:
random.seed(seed)
# Add m initial nodes (m0 in barabasi-speak)
G=empty_graph(m)
G.name="barabasi_albert_graph(%s,%s)"%(n,m)
# Target nodes for new edges
targets=list(range(m))
# List of existing nodes, with nodes repeated once for each adjacent edge
repeated_nodes=[]
# Start adding the other n-m nodes. The first node is m.
source=m
while source<n:
# Add edges to m nodes from the source.
G.add_edges_from(zip([source]*m,targets))
# Add one node to the list for each new edge just created.
repeated_nodes.extend(targets)
# And the new node "source" has m edges to add to the list.
repeated_nodes.extend([source]*m)
# Now choose m unique nodes from the existing nodes
# Pick uniformly from repeated_nodes (preferential attachement)
targets = _random_subset(repeated_nodes,m)
source += 1
return G
def powerlaw_cluster_graph(n, m, p, seed=None):
"""Holme and Kim algorithm for growing graphs with powerlaw
degree distribution and approximate average clustering.
Parameters
----------
n : int
the number of nodes
m : int
the number of random edges to add for each new node
p : float,
Probability of adding a triangle after adding a random edge
seed : int, optional
Seed for random number generator (default=None).
Notes
-----
The average clustering has a hard time getting above a certain
cutoff that depends on ``m``. This cutoff is often quite low. The
transitivity (fraction of triangles to possible triangles) seems to
decrease with network size.
It is essentially the Barabási–Albert (BA) growth model with an
extra step that each random edge is followed by a chance of
making an edge to one of its neighbors too (and thus a triangle).
This algorithm improves on BA in the sense that it enables a
higher average clustering to be attained if desired.
It seems possible to have a disconnected graph with this algorithm
since the initial ``m`` nodes may not be all linked to a new node
on the first iteration like the BA model.
Raises
------
NetworkXError
If ``m`` does not satisfy ``1 <= m <= n`` or ``p`` does not
satisfy ``0 <= p <= 1``.
References
----------
.. [1] P. Holme and B. J. Kim,
"Growing scale-free networks with tunable clustering",
Phys. Rev. E, 65, 026107, 2002.
"""
if m < 1 or n < m:
raise nx.NetworkXError(\
"NetworkXError must have m>1 and m<n, m=%d,n=%d"%(m,n))
if p > 1 or p < 0:
raise nx.NetworkXError(\
"NetworkXError p must be in [0,1], p=%f"%(p))
if seed is not None:
random.seed(seed)
G=empty_graph(m) # add m initial nodes (m0 in barabasi-speak)
G.name="Powerlaw-Cluster Graph"
repeated_nodes = list(G.nodes()) # list of existing nodes to sample from
# with nodes repeated once for each adjacent edge
source=m # next node is m
while source<n: # Now add the other n-1 nodes
possible_targets = _random_subset(repeated_nodes,m)
# do one preferential attachment for new node
target=possible_targets.pop()
G.add_edge(source,target)
repeated_nodes.append(target) # add one node to list for each new link
count=1
while count<m: # add m-1 more new links
if random.random()<p: # clustering step: add triangle
neighborhood=[nbr for nbr in G.neighbors(target) \
if not G.has_edge(source,nbr) \
and not nbr==source]
if neighborhood: # if there is a neighbor without a link
nbr=random.choice(neighborhood)
G.add_edge(source,nbr) # add triangle
repeated_nodes.append(nbr)
count=count+1
continue # go to top of while loop
# else do preferential attachment step if above fails
target=possible_targets.pop()
G.add_edge(source,target)
repeated_nodes.append(target)
count=count+1
repeated_nodes.extend([source]*m) # add source node to list m times
source += 1
return G
def duplication_divergence_graph(n, p, seed=None):
"""Returns an undirected graph using the duplication-divergence model.
A graph of ``n`` nodes is created by duplicating the initial nodes
and retaining edges incident to the original nodes with a retention
probability ``p``.
Parameters
----------
n : int
The desired number of nodes in the graph.
p : float
The probability for retaining the edge of the replicated node.
seed : int, optional
A seed for the random number generator of ``random`` (default=None).
Returns
-------
G : Graph
Raises
------
NetworkXError
If `p` is not a valid probability.
If `n` is less than 2.
References
----------
.. [1] I. Ispolatov, P. L. Krapivsky, A. Yuryev,
"Duplication-divergence model of protein interaction network",
Phys. Rev. E, 71, 061911, 2005.
"""
if p > 1 or p < 0:
msg = "NetworkXError p={0} is not in [0,1].".format(p)
raise nx.NetworkXError(msg)
if n < 2:
msg = 'n must be greater than or equal to 2'
raise nx.NetworkXError(msg)
if seed is not None:
random.seed(seed)
G = nx.Graph()
G.graph['name'] = "Duplication-Divergence Graph"
# Initialize the graph with two connected nodes.
G.add_edge(0,1)
i = 2
while i < n:
# Choose a random node from current graph to duplicate.
random_node = random.choice(list(G.nodes()))
# Make the replica.
G.add_node(i)
# flag indicates whether at least one edge is connected on the replica.
flag=False
for nbr in G.neighbors(random_node):
if random.random() < p:
# Link retention step.
G.add_edge(i, nbr)
flag = True
if not flag:
# Delete replica if no edges retained.
G.remove_node(i)
else:
# Successful duplication.
i += 1
return G
def random_lobster(n, p1, p2, seed=None):
"""Returns a random lobster graph.
A lobster is a tree that reduces to a caterpillar when pruning all
leaf nodes. A caterpillar is a tree that reduces to a path graph
when pruning all leaf nodes; setting ``p2`` to zero produces a caterillar.
Parameters
----------
n : int
The expected number of nodes in the backbone
p1 : float
Probability of adding an edge to the backbone
p2 : float
Probability of adding an edge one level beyond backbone
seed : int, optional
Seed for random number generator (default=None).
"""
# a necessary ingredient in any self-respecting graph library
if seed is not None:
random.seed(seed)
llen=int(2*random.random()*n + 0.5)
L=path_graph(llen)
L.name="random_lobster(%d,%s,%s)"%(n,p1,p2)
# build caterpillar: add edges to path graph with probability p1
current_node=llen-1
for n in range(llen):
if random.random()<p1: # add fuzzy caterpillar parts
current_node+=1
L.add_edge(n,current_node)
if random.random()<p2: # add crunchy lobster bits
current_node+=1
L.add_edge(current_node-1,current_node)
return L # voila, un lobster!
def random_shell_graph(constructor, seed=None):
"""Returns a random shell graph for the constructor given.
Parameters
----------
constructor : list of three-tuples
Represents the parameters for a shell, starting at the center
shell. Each element of the list must be of the form ``(n, m,
d)``, where ``n`` is the number of nodes in the shell, ``m`` is
the number of edges in the shell, and ``d`` is the ratio of
inter-shell (next) edges to intra-shell edges. If ``d`` is zero,
there will be no intra-shell edges, and if ``d`` is one there
will be all possible intra-shell edges.
seed : int, optional
Seed for random number generator (default=None).
Examples
--------
>>> constructor = [(10, 20, 0.8), (20, 40, 0.8)]
>>> G = nx.random_shell_graph(constructor)
"""
G=empty_graph(0)
G.name="random_shell_graph(constructor)"
if seed is not None:
random.seed(seed)
glist=[]
intra_edges=[]
nnodes=0
# create gnm graphs for each shell
for (n,m,d) in constructor:
inter_edges=int(m*d)
intra_edges.append(m-inter_edges)
g=nx.convert_node_labels_to_integers(
gnm_random_graph(n,inter_edges),
first_label=nnodes)
glist.append(g)
nnodes+=n
G=nx.operators.union(G,g)
# connect the shells randomly
for gi in range(len(glist)-1):
nlist1 = list(glist[gi])
nlist2 = list(glist[gi + 1])
total_edges=intra_edges[gi]
edge_count=0
while edge_count < total_edges:
u = random.choice(nlist1)
v = random.choice(nlist2)
if u==v or G.has_edge(u,v):
continue
else:
G.add_edge(u,v)
edge_count=edge_count+1
return G
def random_powerlaw_tree(n, gamma=3, seed=None, tries=100):
"""Returns a tree with a power law degree distribution.
Parameters
----------
n : int
The number of nodes.
gamma : float
Exponent of the power law.
seed : int, optional
Seed for random number generator (default=None).
tries : int
Number of attempts to adjust the sequence to make it a tree.
Raises
------
NetworkXError
If no valid sequence is found within the maximum number of
attempts.
Notes
-----
A trial power law degree sequence is chosen and then elements are
swapped with new elements from a powerlaw distribution until the
sequence makes a tree (by checking, for example, that the number of
edges is one smaller than the number of nodes).
"""
# This call may raise a NetworkXError if the number of tries is succeeded.
seq = random_powerlaw_tree_sequence(n, gamma=gamma, seed=seed, tries=tries)
G = degree_sequence_tree(seq)
G.name = "random_powerlaw_tree(%s,%s)" % (n, gamma)
return G
def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100):
"""Returns a degree sequence for a tree with a power law distribution.
Parameters
----------
n : int,
The number of nodes.
gamma : float
Exponent of the power law.
seed : int, optional
Seed for random number generator (default=None).
tries : int
Number of attempts to adjust the sequence to make it a tree.
Raises
------
NetworkXError
If no valid sequence is found within the maximum number of
attempts.
Notes
-----
A trial power law degree sequence is chosen and then elements are
swapped with new elements from a power law distribution until
the sequence makes a tree (by checking, for example, that the number of
edges is one smaller than the number of nodes).
"""
if seed is not None:
random.seed(seed)
# get trial sequence
z = nx.utils.powerlaw_sequence(n, exponent=gamma)
# round to integer values in the range [0,n]
zseq = [min(n, max(int(round(s)), 0)) for s in z]
# another sequence to swap values from
z = nx.utils.powerlaw_sequence(tries, exponent=gamma)
# round to integer values in the range [0,n]
swap = [min(n, max(int(round(s)), 0)) for s in z]
for deg in swap:
# If this degree sequence can be the degree sequence of a tree, return
# it. It can be a tree if the number of edges is one fewer than the
# number of nodes, or in other words, ``n - sum(zseq) / 2 == 1``. We
# use an equivalent condition below that avoids floating point
# operations.
if 2 * n - sum(zseq) == 2:
return zseq
index = random.randint(0, n - 1)
zseq[index] = swap.pop()
raise nx.NetworkXError('Exceeded max (%d) attempts for a valid tree'
' sequence.' % tries)
|
michaelpacer/networkx
|
networkx/generators/random_graphs.py
|
Python
|
bsd-3-clause
| 31,462
|
[
"VisIt"
] |
a915537ed5f64e7638b3da5012702e3c7c12eaabe877ad42a9dbaa77fa241dcc
|
# coding: UTF-8
# 미국의 주의 수도를 찾는 함수를 만드세요.
STATES_CAPITALS = {
'Alabama': 'Montgomery',
'Alaska': 'Juneau',
'Arizona': 'Phoenix',
'Arkansas': 'Little Rock',
'California': 'Sacramento',
'Colorado': 'Denver',
'Connecticut': 'Hartford',
'Delaware': 'Dover',
'Florida': 'Tallahassee',
'Georgia': 'Atlanta',
'Hawaii': 'Honolulu',
'Idaho': 'Boise',
'Illinois': 'Springfield',
'Indiana': 'Indianapolis',
'Iowa': 'Des Moines',
'Kansas': 'Topeka',
'Kentucky': 'Frankfort',
'Louisiana': 'Baton Rouge',
'Maine': 'Augusta',
'Maryland': 'Annapolis',
'Massachusetts': 'Boston',
'Michigan': 'Lansing',
'Minnesota': 'Saint Paul',
'Mississippi': 'Jackson',
'Missouri': 'Jefferson City',
'Montana': 'Helena',
'Nebraska': 'Lincoln',
'Nevada': 'Carson City',
'New Hampshire': 'Concord',
'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe',
'New York': 'Albany',
'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck',
'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem',
'Pennsylvania': 'Harrisburg',
'Rhode Island': 'Providence',
'South Carolina': 'Columbia',
'South Dakota': 'Pierre',
'Tennessee': 'Nashville',
'Texas': 'Austin',
'Utah': 'Salt Lake City',
'Vermont': 'Montpelier',
'Virginia': 'Richmond',
'Washington': 'Olympia',
'West Virginia': 'Charleston',
'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne',
}
# Write your code below.
|
inhwane/kookmin
|
source/problem/function_state_capital.py
|
Python
|
mit
| 1,565
|
[
"COLUMBUS"
] |
e89bb1509217f07e49987a510e7fb1d6146123f0409116f5adc24438d2c6934f
|
# Copyright (C) 2008 NSC Jyvaskyla
# Please see the accompanying LICENSE file for further information.
from .atoms import Atoms
from .aseinterface import Hotbit, database_from_path
from .atoms import ExtendedTrajectory
from .electrostatics import Electrostatics
from .element import Element
from .elements import Elements
from .environment import Environment
from .environment import LinearlyPolarizedLaser
from .grids import Grids
from .interactions import Interactions
from .occupations import Occupations
from .output import Output
from .repulsion import RepulsivePotential
from .repulsion import Repulsion
from .solver import Solver
from .states import States
from .wfpropagation import WFPropagation
from hotbit.analysis import LinearResponse
from hotbit.analysis import MullikenAnalysis
from hotbit.analysis import MullikenBondAnalysis
from hotbit.analysis import DensityOfStates
from hotbit.containers import DoubleChiral
from hotbit.containers import Bravais
from hotbit.containers import Chiral
from hotbit.containers import Wedge
from hotbit.containers import Sphere
from hotbit.containers import Saddle
from hotbit.containers import Gaussian
from hotbit.containers import Slab
from hotbit.containers import ContainerTest1
from hotbit.containers import TwistAndTurn
from hotbit.parametrization import SlaterKosterTable
from hotbit.parametrization import KSAllElectron
from hotbit.parametrization import RepulsiveFitting
from hotbit.parametrization import ParametrizationTest
from os import environ, path
import atexit
import _hotbit
hbpar = environ.get('HOTBIT_PARAMETERS')
fixpar = path.join(environ.get('HOTBIT_PARAMETERS'),'fixed_parameters')
testpar = path.join(environ.get('HOTBIT_PARAMETERS'),'inofficial')
#
# Free eigenvalue solver workspace on exit
#
atexit.register(_hotbit.free_geig_workspace)
from hotbit.version import hotbit_version
|
pekkosk/hotbit
|
hotbit/__init__.py
|
Python
|
gpl-2.0
| 1,867
|
[
"Gaussian"
] |
b5f6a93b927782f990045fa7ca33ec75631324d25c62ada503de2ef730f965d6
|
from django.db import IntegrityError
from django.utils.functional import cached_property
from redis.exceptions import ConnectionError, ResponseError
from experiments.models import Enrollment
from experiments.manager import experiment_manager
from experiments.dateutils import now, fix_awareness, datetime_from_timestamp, timestamp_from_datetime
from experiments.signals import user_enrolled
from experiments.experiment_counters import ExperimentCounter
from experiments.redis_client import get_redis_client
from experiments import conf
from collections import namedtuple
from datetime import timedelta
import collections
import numbers
import logging
import json
try:
from itertools import zip_longest as izip_longest
except ImportError:
from itertools import izip_longest
logger = logging.getLogger('experiments')
UNCONFIRMED_HUMAN_GOALS_REDIS_KEY = "experiments:goals:%s"
def participant(request=None, session=None, user=None):
# This caches the experiment user on the request object because WebUser can involve database lookups that
# it caches. Signals are attached to login/logout to clear the cache using clear_participant_cache
if request and hasattr(request, '_experiments_user'):
return request._experiments_user
else:
result = _get_participant(request, session, user)
if request:
request._experiments_user = result
return result
def clear_participant_cache(request):
if hasattr(request, '_experiments_user'):
del request._experiments_user
def _get_participant(request, session, user):
if request and hasattr(request, 'user') and not user:
user = request.user
if request and hasattr(request, 'session') and not session:
session = request.session
if request and conf.BOT_REGEX.search(request.META.get("HTTP_USER_AGENT", "")):
return DummyUser()
elif user and user.is_authenticated:
if getattr(user, 'is_confirmed_human', True):
return WebUser(user=user, request=request)
else:
return DummyUser()
elif session:
return WebUser(session=session, request=request)
else:
return DummyUser()
EnrollmentData = namedtuple('EnrollmentData', ['experiment', 'alternative', 'enrollment_date', 'last_seen'])
class BaseUser(object):
"""Represents a user (either authenticated or session based) which can take part in experiments"""
def __init__(self):
self.experiment_counter = ExperimentCounter()
def enroll(self, experiment_name, alternatives, force_alternative=None):
"""
Enroll this user in the experiment if they are not already part of it. Returns the selected alternative
force_alternative: Optionally force a user in an alternative at enrollment time
"""
chosen_alternative = conf.CONTROL_GROUP
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
if experiment.is_displaying_alternatives():
if isinstance(alternatives, collections.Mapping):
if conf.CONTROL_GROUP not in alternatives:
experiment.ensure_alternative_exists(conf.CONTROL_GROUP, 1)
for alternative, weight in alternatives.items():
experiment.ensure_alternative_exists(alternative, weight)
else:
alternatives_including_control = alternatives + [conf.CONTROL_GROUP]
for alternative in alternatives_including_control:
experiment.ensure_alternative_exists(alternative)
assigned_alternative = self._get_enrollment(experiment)
if assigned_alternative:
chosen_alternative = assigned_alternative
elif experiment.is_accepting_new_users():
if force_alternative:
chosen_alternative = force_alternative
else:
chosen_alternative = experiment.random_alternative()
self._set_enrollment(experiment, chosen_alternative)
else:
chosen_alternative = experiment.default_alternative
return chosen_alternative
def get_alternative(self, experiment_name):
"""
Get the alternative this user is enrolled in.
"""
experiment = None
try:
# catching the KeyError instead of using .get so that the experiment is auto created if desired
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP
def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative)
def goal(self, goal_name, count=1):
"""Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in."""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count)
def confirm_human(self):
"""Mark that this is a real human being (not a bot) and thus results should be counted"""
pass
def incorporate(self, other_user):
"""Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in."""
for enrollment in other_user._get_all_enrollments():
if not self._get_enrollment(enrollment.experiment):
self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier())
for goal_name, count in goals:
self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count)
other_user._cancel_enrollment(enrollment.experiment)
def visit(self):
"""Record that the user has visited the site for the purposes of retention tracking"""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.
# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is
# on the page and therefore it would automatically trigger and be valueless.
# This should be used for experiments when we enroll the user as part of the pageview,
# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,
# this is mainly useful for notification actions when the users isn't initially present.
if not enrollment.last_seen:
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH):
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
def _get_enrollment(self, experiment):
"""Get the name of the alternative this user is enrolled in for the specified experiment
`experiment` is an instance of Experiment. If the user is not currently enrolled returns None."""
raise NotImplementedError
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented."""
raise NotImplementedError
def is_enrolled(self, experiment_name, alternative):
"""Enroll this user in the experiment if they are not already part of it. Returns the selected alternative"""
"""Test if the user is enrolled in the supplied alternative for the given experiment.
The supplied alternative will be added to the list of possible alternatives for the
experiment if it is not already there. If the user is not yet enrolled in the supplied
experiment they will be enrolled, and an alternative chosen at random."""
chosen_alternative = self.enroll(experiment_name, [alternative])
return alternative == chosen_alternative
def _participant_identifier(self):
"Unique identifier for this user in the counter store"
raise NotImplementedError
def _get_all_enrollments(self):
"Return experiment, alternative tuples for all experiments the user is enrolled in"
raise NotImplementedError
def _cancel_enrollment(self, experiment):
"Remove the enrollment and any goals the user has against this experiment"
raise NotImplementedError
def _experiment_goal(self, experiment, alternative, goal_name, count):
"Record a goal against a particular experiment and alternative"
raise NotImplementedError
def _set_last_seen(self, experiment, last_seen):
"Set the last time the user was seen associated with this experiment"
raise NotImplementedError
class DummyUser(BaseUser):
def _get_enrollment(self, experiment):
return None
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
pass
def is_enrolled(self, experiment_name, alternative):
return alternative == conf.CONTROL_GROUP
def incorporate(self, other_user):
for enrollment in other_user._get_all_enrollments():
other_user._cancel_enrollment(enrollment.experiment)
def _participant_identifier(self):
return ""
def _get_all_enrollments(self):
return []
def _is_enrolled_in_experiment(self, experiment):
return False
def _cancel_enrollment(self, experiment):
pass
def _get_goal_counts(self, experiment, alternative):
return {}
def _experiment_goal(self, experiment, alternative, goal_name, count):
pass
def _set_last_seen(self, experiment, last_seen):
pass
class WebUser(BaseUser):
def __init__(self, user=None, session=None, request=None):
self._enrollment_cache = {}
self.user = user
self.session = session
self.request = request
self._redis_goals_key = UNCONFIRMED_HUMAN_GOALS_REDIS_KEY % self._participant_identifier()
super(WebUser, self).__init__()
@cached_property
def _redis(self):
return get_redis_client()
@property
def _qs_kwargs(self):
if self.user:
return {"user": self.user}
else:
return {"session_key": self._session_key}
def _get_enrollment(self, experiment):
if experiment.name not in self._enrollment_cache:
try:
self._enrollment_cache[experiment.name] = Enrollment.objects.get(experiment=experiment, **self._qs_kwargs).alternative
except Enrollment.DoesNotExist:
self._enrollment_cache[experiment.name] = None
return self._enrollment_cache[experiment.name]
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
if experiment.name in self._enrollment_cache:
del self._enrollment_cache[experiment.name]
try:
enrollment, _ = Enrollment.objects.get_or_create(experiment=experiment, defaults={'alternative': alternative}, **self._qs_kwargs)
except IntegrityError:
# Already registered (db race condition under high load)
return
# Update alternative if it doesn't match
enrollment_changed = False
if enrollment.alternative != alternative:
enrollment.alternative = alternative
enrollment_changed = True
if enrollment_date:
enrollment.enrollment_date = enrollment_date
enrollment_changed = True
if last_seen:
enrollment.last_seen = last_seen
enrollment_changed = True
if enrollment_changed:
enrollment.save()
if self._is_verified_human:
self.experiment_counter.increment_participant_count(experiment, alternative, self._participant_identifier())
else:
logger.info(json.dumps({'type':'participant_unconfirmed', 'experiment': experiment.name, 'alternative': alternative, 'participant': self._participant_identifier()}))
user_enrolled.send(self, experiment=experiment.name, alternative=alternative, user=self.user, session=self.session)
def _participant_identifier(self):
if self.user:
return 'user:%s' % self.user.pk
else:
return 'session:%s' % self._session_key
def _get_all_enrollments(self):
enrollments = Enrollment.objects.filter(**self._qs_kwargs).select_related("experiment")
if enrollments:
for enrollment in enrollments:
yield EnrollmentData(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
def _cancel_enrollment(self, experiment):
try:
enrollment = Enrollment.objects.get(experiment=experiment, **self._qs_kwargs)
except Enrollment.DoesNotExist:
pass
else:
self.experiment_counter.remove_participant(experiment, enrollment.alternative, self._participant_identifier())
enrollment.delete()
def _experiment_goal(self, experiment, alternative, goal_name, count):
if self._is_verified_human:
self.experiment_counter.increment_goal_count(experiment, alternative, goal_name, self._participant_identifier(), count)
else:
try:
self._redis.lpush(self._redis_goals_key, json.dumps((experiment.name, alternative, goal_name, count)))
# Setting an expiry on this data otherwise it could linger for a while
# and also fill up redis quickly if lots of bots begin to scrape the app.
# Human confirmation processes are generally quick so this defaults to a
# low value (but it can be configured via Django settings)
self._redis.expire(self._redis_goals_key, conf.REDIS_GOALS_TTL)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
logger.info(json.dumps({'type': 'goal_hit_unconfirmed', 'goal': goal_name, 'goal_count': count, 'experiment': experiment.name, 'alternative': alternative, 'participant': self._participant_identifier()}))
def confirm_human(self):
if self.user:
return
self.session[conf.CONFIRM_HUMAN_SESSION_KEY] = True
logger.info(json.dumps({'type': 'confirm_human', 'participant': self._participant_identifier()}))
# Replay enrollments
for enrollment in self._get_all_enrollments():
self.experiment_counter.increment_participant_count(enrollment.experiment, enrollment.alternative, self._participant_identifier())
# Replay goals
try:
goals = self._redis.lrange(self._redis_goals_key, 0, -1)
if goals:
try:
for data in goals:
experiment_name, alternative, goal_name, count = json.loads(data)
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self.experiment_counter.increment_goal_count(experiment, alternative, goal_name, self._participant_identifier(), count)
except ValueError:
pass # Values from older version
finally:
self._redis.delete(self._redis_goals_key)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def _set_last_seen(self, experiment, last_seen):
Enrollment.objects.filter(experiment=experiment, **self._qs_kwargs).update(last_seen=last_seen)
@property
def _is_verified_human(self):
if conf.VERIFY_HUMAN and not self.user:
return self.session.get(conf.CONFIRM_HUMAN_SESSION_KEY, False)
else:
return True
@property
def _session_key(self):
if not self.session:
return None
if 'experiments_session_key' not in self.session:
if not self.session.session_key:
self.session.save() # Force session key
self.session['experiments_session_key'] = self.session.session_key
return self.session['experiments_session_key']
def grouper(iterable, n, fillvalue=None):
# Taken from the recipe at
# https://docs.python.org/2.7/library/itertools.html#itertools-recipes
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
__all__ = ['participant']
|
mixcloud/django-experiments
|
experiments/utils.py
|
Python
|
mit
| 18,840
|
[
"VisIt"
] |
d17dff353e22df0607921e5a89b38d22e7d6950e45ba0ff27d38152ac077db54
|
#!python
import random
import math
import pyglet
import pyglet.gl as gl
import vector as vec
import timevars as tv
from gameassets import GameAssets
from config import Config
#gAssets = None
class Swarm(object):
''' All the non-player objects (not sure if ShotSprites are included)'''
def __init__(self, props):
self.props = props
self.meteors = []
self.meteorBatch = pyglet.graphics.Batch()
self.explosions = []
self.gameTime = 0.0
self.meteorCountCurve = tv.PLInterpolator([
( 0, 12),
( 60, 25),
(120, 30),
(240, 45),
(300, 55),
(1000, 1000),
])
self.meteorSpeedCurve = tv.PLInterpolator([
( 0, 100),
( 90, 150),
(180, 200),
(220, 230),
(1000,500),
])
self.meteorPool = (
20 * ['asteroid-1'] +
10 * ['asteroid-2'] +
5 * ['asteroid-3']
)
def initialMeteors(self, n, shipPosition):
w = self.props.windowWidth
h = self.props.windowHeight
for i in range(0,n):
x = random.uniform(-w, 2*w)
y = random.uniform(-h, 2*h)
dx, dy = (x - shipPosition[0]), (y - shipPosition[1])
if dx*dx + dy*dy < 150*150:
# Don't start off right next to a meteor
# And it's okay if we don't get exactly n created here.
continue
speed = random.gauss(100, 30)
name = random.choice(self.meteorPool)
th = 360*random.random()
u,v = vec.uvec(th)
m = MeteorSprite(name, (x, y), (speed*u, speed*v), self.meteorBatch, self.props)
self.meteors.append(m)
def initialMeteors2(self, n, shipPosition):
# Marching lines of death
w = self.props.windowWidth
h = self.props.windowHeight
shipX, shipY = shipPosition
offset = 200
baseSpeed = 200
spacing = 80
for x in range(shipX-w//2, shipX+w//2, spacing):
speed = random.gauss(baseSpeed, 30)
y = shipY - h//2 - offset
name = random.choice(self.meteorPool)
m = MeteorSprite(name, (x, y), (0, speed), self.meteorBatch, self.props)
self.meteors.append(m)
speed = random.gauss(baseSpeed, 30)
y = shipY + h//2 + offset
name = random.choice(self.meteorPool)
m = MeteorSprite(name, (x, y), (0, -speed), self.meteorBatch, self.props)
self.meteors.append(m)
for y in range(shipY-h//2, shipY+h//2, spacing):
speed = random.gauss(baseSpeed, 30)
x = shipX - w//2 - offset
name = random.choice(self.meteorPool)
m = MeteorSprite(name, (x, y), (speed, 0), self.meteorBatch, self.props)
self.meteors.append(m)
speed = random.gauss(baseSpeed, 30)
x = shipX + w//2 + offset
name = random.choice(self.meteorPool)
m = MeteorSprite(name, (x, y), (-speed, 0), self.meteorBatch, self.props)
self.meteors.append(m)
def nItems(self):
return len(self.meteors)
def objects(self):
return self.meteors
def explode(self, meteor):
meteor.alive = False
exp = ExplosionSprite(meteor.x, meteor.y)
self.explosions.append(exp)
def draw(self):
self.meteorBatch.draw()
for exp in self.explosions:
exp.draw()
def update(self, dt):
self.gameTime += dt
for m in self.meteors:
m.update(dt)
if not m.alive:
m.delete()
self.meteors = [m for m in self.meteors if m.alive == True]
for exp in self.explosions:
exp.update(dt)
self.explosions = [e for e in self.explosions if e.alive == True]
#nMeteors = len(self.meteors)
def spawnNew(self, shipPosition, viewportOrigin):
# This is very stochastic. It tries to create a new meteor,
# but if it doesn't on this go-around is just returns, as it will
# be called again fairly soon.
#targetN = 15
targetN = int(self.meteorCountCurve(self.gameTime))
if len(self.meteors) >= targetN:
return
#print "Have", len(self.meteors), "meteors, want", targetN
w = self.props.windowWidth
h = self.props.windowHeight
x = None
y = None
offset = 250
side = random.randint(0,3)
# side selects (left, right, bottom, top)
sides = ('left', 'right', 'bottom', 'top') # for debugging
if side < 2:
# left or right
y = viewportOrigin[1] + random.randrange(h)
if side == 0:
x = viewportOrigin[0] - offset
else:
x = viewportOrigin[0] + w + offset
else:
# top or bottom
x = viewportOrigin[0] + random.randrange(w)
if side == 2:
y = viewportOrigin[1] - offset
else:
y = viewportOrigin[1] + h + offset
# Make sure it's within the meteor field
if x < -w or y < -h or x > 2*w or y > 2*h:
return
speedBase = self.meteorSpeedCurve(self.gameTime)
speed = random.gauss(speedBase, 30)
name = random.choice(self.meteorPool)
th = 360*random.random()
u,v = vec.uvec(th)
m = MeteorSprite(name, (x, y), (speed*u, speed*v), self.meteorBatch, self.props)
self.meteors.append(m)
#print "generated meteor", shipPosition, viewportOrigin, sides[side], x, y
# To Be Obsoleted
def addMeteorsXXX(self, n, shipPosition):
return
w = self.props.windowWidth
h = self.props.windowHeight
for _ in range(0,n):
# Meteors bounce around in a 3w x 3h block
# Spawn new meteor on "opposite side of the torus"
# This is not good!!! XXX
speed = random.gauss(100, 30)
x = tv.wrap(shipPosition[0] + 1.5*w, -w, 2*w)
y = tv.wrap(shipPosition[1] + 1.5*h, -h, 2*h)
th = 360*random.random()
u,v = vec.uvec(th)
m = MeteorSprite("foo", (x, y), (speed*u, speed*v), self.meteorBatch, self.props)
self.meteors.append(m)
#print shipPosition, x, y
def findShotHit(self, shot, margin):
prevNearest = 1000000
hitMeteor = None
rayO, rayU = shot.get()
for m in self.meteors:
x,y = m.getCenter()
across, along = vec.ray_x_pnt(rayO, rayU, vec.Vector(x,y))
if (along > 0 and along < 1200 and
across < m.getRadius() + margin and
along < prevNearest):
hitMeteor = m
prevNearest = along
return hitMeteor
class MeteorSprite(pyglet.sprite.Sprite):
def __init__(self, name, posn, vel, batch, props):
#global gAssets
img = GameAssets.Instance().getImage(name)
super(self.__class__, self).__init__( img, posn[0], posn[1], batch=batch)
self.props = props
self.name = name
#th = 360*random.random()
#u,v = vec.uvec(th)
#speed = random.gauss(100, 50)
#self.motion = tv.LinearMotion2(x,y, speed*u, speed*v)
self.motion = tv.LinearMotion2(posn[0], posn[1], vel[0], vel[1])
#self.motion = tv.LinearMotion2(x,y, 0, 0)
#self.wrapW = w
#self.wrapH = h
#self.motion = tv.LinearMotion2(x,y, 4, 4)
self.angle = angle = tv.LinearMotion(0,90+90*random.random())
self.alive = True
self.timeAlive = 0.0
self.radius = (self.width + self.height)/2/2
self.update(0.0)
def update(self, dt):
self.motion.update(dt)
self.angle.update(dt)
w = self.props.windowWidth
h = self.props.windowHeight
#self.motion.wrap(-self.wrapW, 2 * self.wrapW, -self.wrapH, 2 * self.wrapH)
#self.motion.wrap(-1.0* self.wrapW, 2.0 * self.wrapW, -1.0 * self.wrapH, 2.0 * self.wrapH)
self.motion.bounce(-w, 2.0 * w, -h, 2.0 * h)
self.x, self.y = self.motion.getValue()
self.rotation = self.angle.getValue()
def getCenter(self):
return self.x, self.y
def getRadius(self):
return self.radius
def getValue(self):
return [1]*10
def dump(self):
return "(%d, %d)" % (self.x, self.y)
class ShotSprite(pyglet.sprite.Sprite):
lifeTime = 0.08
def __init__(self, position, angle, batch):
ga = GameAssets.Instance()
super(ShotSprite, self).__init__(ga.getImage('shot'), *position, batch=batch)
self.position = position
self.angle = angle
self.alive = True
self.timeAlive = 0.0
self.update(0.0)
def update(self, dt):
self.x, self.y = self.position
self.rotation = self.angle
self.timeAlive += dt
if self.timeAlive > self.__class__.lifeTime:
self.alive = False
def get(self):
# Sigh. Gotta set some standards w.r.t. angles
return vec.Vector(self.x, self.y), vec.Vector(*vec.uvec(self.rotation+90.))
class MultiExplosion(object):
"""A timed sequence of animated sprites and sounds"""
def __init__(self, x, y, times):
super(MultiExplosion, self).__init__()
self.x = x
self.y = y
self.times = times
self.alive = True
self.timeAlive = 0.0
self.running = False
# Note: sounds are just started and continue on their own, sprites
# need to be rememberd and drawn by us.
self.sprites = []
self.players = []
self.nextTimeIdx = 0
def start(self):
self.running = True
def update(self, dt):
if not self.running:
return
self.timeAlive += dt
for s in self.sprites:
s.update(dt)
if self.nextTimeIdx >= len(self.times):
# Bug: doesn't allow time for last boom to play.
self.alive = False
return
if self.timeAlive < self.times[self.nextTimeIdx]:
return
# Time for next boom
r = random.gauss(40, 20)
th = random.randrange(360)
s = ExplosionSprite(self.x + r*math.cos(th), self.y + r*math.sin(th))
#s = ExplosionSprite(self.x + 0, self.y + 0)
self.sprites.append(s)
if Config.Instance().sound():
player = pyglet.media.Player()
player.queue(GameAssets.Instance().getSound('bomb-explosion-1'))
player.play()
self.players.append(player)
#gAssets.getSound('boom2').play()
self.nextTimeIdx += 1
def done(self):
return not self.alive
def draw(self, window):
for s in self.sprites:
s.draw()
class ExplosionSprite(pyglet.sprite.Sprite):
lifeTime = 4.75 # Just a guess, doesn't matter as it's only used for clean-up
def __init__(self, x, y):
super(self.__class__, self).__init__(GameAssets.Instance().getImage('explosion'), x, y)
#super(self.__class__, self).__init__(GameAssets.Instance().getImage('dbg2'), x, y)
th = 360*random.random()
u,v = vec.uvec(th)
self.alive = True
self.timeAlive = 0.0
self.angle = tv.LinearMotion(0,120)
def update(self, dt):
#self.motion.update(dt)
self.angle.update(dt)
#self.x, self.y = self.motion.getValue()
self.rotation = self.angle.getValue()
self.timeAlive += dt
if self.timeAlive > self.__class__.lifeTime:
self.alive = False
# Not exactly sprites down here, just other things that decorate the screen
class ScoreBoard(pyglet.text.Label):
"""docstring for Score"""
flipRate = 0.05 # seconds between flips
regularFontSize = 30
bigFontSize = 36
yellow = (255,255,0, 200)
red = (255,0,0, 200)
def __init__(self, props):
super(ScoreBoard, self).__init__(
text="0", font_name='Orbitron', bold=True, font_size=ScoreBoard.regularFontSize,
anchor_x = "center", anchor_y="bottom",
color=ScoreBoard.yellow,
x= 25 + props.windowWidth//2,
y=10)
self.value = 0
#self.outOf = 0
self.pendingBumps = []
self.timeSinceBump = 10000.0 # infinity
self.justBumped = False
def addScore(self, bumps):
#self.value += bump
#self.text = "%d / %d" % (self.value, self.outOf)
#self.text = "%d" % (self.value, )
for i, b in enumerate(bumps):
if i < len(self.pendingBumps):
self.pendingBumps[i] += b
else:
self.pendingBumps.append(b)
def update(self, dt):
self.timeSinceBump += dt
if self.pendingBumps and self.timeSinceBump > self.__class__.flipRate:
bump = self.pendingBumps.pop(0)
self.value += bump
if bump < 0:
self.color = ScoreBoard.red
else:
self.color = ScoreBoard.yellow
self.font_size = ScoreBoard.bigFontSize
self.text = "%d" % (self.value, )
self.timeSinceBump = 0.0
self.justBumped = True
elif not self.pendingBumps and self.justBumped :
self.color = ScoreBoard.yellow
# Back to normal size after bumping
self.font_size = ScoreBoard.regularFontSize
self.text = "%d" % (self.value, )
self.justBumped = False
class MeteorRadar(object):
"""docstring for MeteorRader"""
def __init__(self, props):
super(MeteorRadar, self).__init__()
self.props = props
commonOpts = {
'font_name': 'Orbitron',
'bold': True,
'font_size': 24,
'color': (255,255,0, 200),
'text': ""}
t = commonOpts.copy()
t.update( {'anchor_x': "right", 'anchor_y': "bottom", 'x':75, 'y':10})
self.number = pyglet.text.Label(**t)
t = commonOpts.copy()
t.update( {'anchor_x': "left", 'anchor_y': "bottom", 'x':80, 'y':10})
self.text = pyglet.text.Label(**t)
self.nItems = 0
def draw(self):
self.number.draw()
self.text.draw()
def setNumItems(self, n):
if n == self.nItems:
return
self.nItems = n
self.number.text = str(n)
self.text.text = " meteors"
#print "set radar to", n
class TimeDisplay(object):
"""docstring for MeteorRader"""
def __init__(self, props, displayTenths = False):
super(TimeDisplay, self).__init__()
self.props = props
self.seconds = 0.0
# either seconds or 1/10th of seconds, depending on flag
self.currDisplayAmt = -1
self.displayTenths = displayTenths
xPos = props.windowWidth - 100
self.batch = pyglet.graphics.Batch()
commonOpts = {
'font_name': 'Orbitron',
'bold': True,
'font_size': 16,
'color': (255,255,0, 200),
'text': "",
'y': 10,
'batch': self.batch
}
t = commonOpts.copy()
t.update( {'x':xPos})
self.min = pyglet.text.Label(**t)
xPos += 22
t.update( {'x':xPos, 'text': ":"})
self.colon = pyglet.text.Label(**t)
xPos += 10
t.update( {'x':xPos, 'text': ""})
self.sec10s = pyglet.text.Label(**t)
xPos += 22
t.update( {'x':xPos})
self.sec1s = pyglet.text.Label(**t)
if self.displayTenths:
xPos += 20
t.update( {'x':xPos, 'text': "."})
self.decimal = pyglet.text.Label(**t)
xPos += 8
t.update( {'x':xPos, 'text': ""})
self.secTenths = pyglet.text.Label(**t)
self.nItems = 0
#def update(self, dt):
def setTime(self, seconds):
#self.seconds += dt
self.seconds = seconds
#secondsDisp = int(10*self.seconds)/10.
dispAmt = int( 10*self.seconds if self.displayTenths else self.seconds)
if dispAmt != self.currDisplayAmt:
t = int(dispAmt/10. if self.displayTenths else dispAmt)
m,s = divmod(t, 60)
s10, s1 = divmod(s, 10)
self.min.text = str(m)
self.sec10s.text = str(s10)
self.sec1s.text = str(s1)
if self.displayTenths:
self.secTenths.text = str(int(dispAmt - 10*t))
def draw(self):
self.batch.draw()
#self.min.draw()
#self.colon.draw()
#self.sec10s.draw()
#self.sec1s.draw()
class GameOver(object):
"""docstring for GameOver"""
def __init__(self, props):
super(GameOver, self).__init__()
self.props = props
self.display = "GAME OVER"
self.text = pyglet.text.Label(
text=self.display, font_name='Orbitron', bold=True, font_size=108,
anchor_x = "center", anchor_y="bottom",
color=(0,255,0, 200),
#x=props.windowWidth//2,
#y=200
x=0,
y=0
)
self.timeAlive = 0
self.zoom = tv.PLInterpolator([(0,0.2), (1,0.3), (2,0.5), (3,1.0), (1000,1.0)])
self.height = tv.PLInterpolator([(0,150), (2,450), (3,320), (1000,370)])
self.height.shift(0,props.windowHeight-660)
self.alive = True
def update(self, dt):
self.timeAlive += dt
if self.timeAlive > 4.0:
self.alive = False
def draw(self):
a = self.zoom(self.timeAlive)
h = self.height(self.timeAlive)
gl.glPushMatrix()
gl.glTranslatef(self.props.windowWidth//2, h, 0)
gl.glScalef(a,a,a)
self.text.draw()
gl.glPopMatrix()
def done(self):
return not self.alive
class StarField(object):
def __init__(self, w, h ):
ga = GameAssets.Instance()
self.stars = []
self.batch = pyglet.graphics.Batch()
arr = (
22 * [ga.getImage('star-sml-1')] +
22 * [ga.getImage('star-sml-2')] +
22 * [ga.getImage('star-sml-3')] +
22 * [ga.getImage('star-sml-4')] +
22 * [ga.getImage('star-sml-5')] +
22 * [ga.getImage('star-sml-6')] +
15 * [ga.getImage('star-med-1')] +
15 * [ga.getImage('star-med-2')] +
3 * [ga.getImage('star-lrg-1')] +
3 * [ga.getImage('star-lrg-2')]
)
#arr = 50 * [ga.getImage('star-sml-2')]
#arr = 50 * [ga.getImage('star-sml-5')]
#arr = 50 * [ga.getImage('star-sml-6')]
for st in 7*arr:
#x, y = random.randrange(-2*w,3*w), random.randrange(-2*h,3*h)
x, y = random.gauss(0.5*w,2*w), random.gauss(0.5*h,2*h)
sprite = pyglet.sprite.Sprite(st, x, y, batch=self.batch)
#sprite.opacity = 128 + 128 * random.random()
sprite.opacity = random.gauss(200, 30)
sprite.rotation = 360 * random.random()
self.stars.append(sprite)
glxy = pyglet.sprite.Sprite(ga.getImage('galaxy'), 0.3*w, 0.3*h, batch=self.batch)
self.stars.append(glxy)
def update(self, dt):
pass # For now. Later, twinkling!
def draw(self):
self.batch.draw()
# De-buggery
class DgbSquare(pyglet.sprite.Sprite):
def __init__(self, x, y):
super(DgbSquare, self).__init__(GameAssets.Instance().getImage('dbg1'), x,y)
self.xPos = x
self.yPos = y
def shift(self, dx, dy):
self.xPos += dx
self.yPos += dy
def update(self, dt):
self.x = self.xPos
self.y = self.yPos
class DgbSquare2(pyglet.sprite.Sprite):
def __init__(self, x, y):
super(self.__class__, self).__init__(GameAssets.Instance().getImage('dbg2'), x,y)
d = 300
p = 2000
critP = d*d/4
critD = math.sqrt(4.0 * p)
d = 500
#print "critP", critP, ", critD", critD
#print "p", p, ", d", d
# p 2000 , d 500 works nice
self.xPos = tv.TargetTracker(p, d)
self.xPos.initVals(0,0)
self.yPos = tv.TargetTracker(p, d)
self.yPos.initVals(0,0)
def shift(self, dx, dy):
self.xPos += dx
self.yPos += dy
def update(self, dt):
self.x = self.xPos.update(dt)
self.y = self.yPos.update(dt)
|
sergio-py2/meteor
|
sprites.py
|
Python
|
mit
| 20,802
|
[
"Galaxy"
] |
04c18390b6dabd4cc2f4ca931728dc18242d5d1d29b0a9c044b71d79b134a57e
|
from __future__ import print_function, division
import random
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, as_int, range
from sympy.core.function import count_ops
from sympy.core.decorators import call_highest_priority
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.simplify import simplify as _simplify
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices.matrices import (MatrixBase,
ShapeError, a2idx, classof)
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DenseMatrix(MatrixBase):
is_MatrixExpr = False
_op_priority = 10.01
_class_priority = 4
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
except (TypeError, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# row-wise decomposition of matrix
if isinstance(key, slice):
return self._mat[key]
return self._mat[a2idx(key)]
def __setitem__(self, key, value):
raise NotImplementedError()
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
for i in range(self.rows):
for j in range(i + 1, self.cols):
if self[i, j] or self[j, i]:
return False
return True
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return [self._mat[i: i + self.cols]
for i in range(0, len(self), self.cols)]
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_op
row_swap
row_del
row_join
row_insert
"""
return self[i, :]
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_op
col_swap
col_del
col_join
col_insert
"""
return self[:, j]
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = 0
for i in range(self.cols):
trace += self._mat[i*self.cols + i]
return trace
def _eval_determinant(self):
return self.det()
def _eval_transpose(self):
"""Matrix transposition.
Examples
========
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
a = []
for i in range(self.cols):
a.extend(self._mat[i::self.cols])
return self._new(self.cols, self.rows, a)
def _eval_conjugate(self):
"""By-element conjugation.
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
out = self._new(self.rows, self.cols,
lambda i, j: self[i, j].conjugate())
return out
def _eval_adjoint(self):
return self.T.C
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using the method indicated (default
is Gauss elimination).
kwargs
======
method : ('GE', 'LU', or 'ADJ')
iszerofunc
try_block_diag
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the ``try_block_diag`` keyword, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerosfunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_LU
inverse_GE
inverse_ADJ
"""
from sympy.matrices import diag
method = kwargs.get('method', 'GE')
iszerofunc = kwargs.get('iszerofunc', _iszero)
if kwargs.get('try_block_diag', False):
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
M = self.as_mutable()
if method == "GE":
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
else:
# make sure to add an invertibility check (as in inverse_LU)
# if a new method is added.
raise ValueError("Inversion method unrecognized")
return self._new(rv)
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> from sympy import cos
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.equals
"""
try:
if self.shape != other.shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
except AttributeError:
return False
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, Matrix):
return self._mat == other._mat
elif isinstance(other, MatrixBase):
return self._mat == Matrix(other)._mat
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def _cholesky(self):
"""Helper function of cholesky.
Without the error checks.
To be used privately. """
L = zeros(self.rows, self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / L[j, j])*(self[i, j] -
sum(L[i, k]*L[j, k] for k in range(j)))
L[i, i] = sqrt(self[i, i] -
sum(L[i, k]**2 for k in range(i)))
return self._new(L)
def _LDLdecomposition(self):
"""Helper function of LDLdecomposition.
Without the error checks.
To be used privately.
"""
D = zeros(self.rows, self.rows)
L = eye(self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(self[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = self[i, i] - sum(L[i, k]**2*D[k, k]
for k in range(i))
return self._new(L), self._new(D)
def _lower_triangular_solve(self, rhs):
"""Helper function of function lower_triangular_solve.
Without the error checks.
To be used privately.
"""
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(self.rows):
if self[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i))) / self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Helper function of function upper_triangular_solve.
Without the error checks, to be used privately. """
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(self.rows)):
if self[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i + 1, self.rows))) / self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"""Helper function of function diagonal_solve,
without the error checks, to be used privately.
"""
return self._new(rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self._new(self.rows, self.cols, list(map(f, self._mat)))
return out
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self._mat[i*cols + j])
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return Matrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix
"""
from .immutable import ImmutableMatrix as cls
if self.rows and self.cols:
return cls._new(self.tolist())
return cls._new(self.rows, self.cols, [])
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls._new(r, c, [cls._sympify(0)]*r*c)
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
mat = [cls._sympify(0)]*n*n
mat[::n + 1] = [cls._sympify(1)]*n
return cls._new(n, n, mat)
############################
# Mutable matrix operators #
############################
@call_highest_priority('__radd__')
def __add__(self, other):
return super(DenseMatrix, self).__add__(_force_mutable(other))
@call_highest_priority('__add__')
def __radd__(self, other):
return super(DenseMatrix, self).__radd__(_force_mutable(other))
@call_highest_priority('__rsub__')
def __sub__(self, other):
return super(DenseMatrix, self).__sub__(_force_mutable(other))
@call_highest_priority('__sub__')
def __rsub__(self, other):
return super(DenseMatrix, self).__rsub__(_force_mutable(other))
@call_highest_priority('__rmul__')
def __mul__(self, other):
return super(DenseMatrix, self).__mul__(_force_mutable(other))
@call_highest_priority('__mul__')
def __rmul__(self, other):
return super(DenseMatrix, self).__rmul__(_force_mutable(other))
@call_highest_priority('__div__')
def __div__(self, other):
return super(DenseMatrix, self).__div__(_force_mutable(other))
@call_highest_priority('__truediv__')
def __truediv__(self, other):
return super(DenseMatrix, self).__truediv__(_force_mutable(other))
@call_highest_priority('__rpow__')
def __pow__(self, other):
return super(DenseMatrix, self).__pow__(other)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return Matrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
self = object.__new__(cls)
self.rows = rows
self.cols = cols
self._mat = list(flat_list) # create a shallow copy
return self
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._mat[i*self.cols + j] = value
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
"same dimensions "
"as the in sub-Matrix given by `key`."))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> from sympy.matrices import eye
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
copyin_matrix
"""
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
return self.copyin_matrix(key, Matrix(value))
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
row_op
col_op
"""
i0 = i*self.cols
k0 = k*self.cols
ri = self._mat[i0: i0 + self.cols]
rk = self._mat[k0: k0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, y) for x, y in zip(ri, rk) ]
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
zip_row_op
col_op
"""
i0 = i*self.cols
ri = self._mat[i0: i0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, j) for x, j in zip(ri, list(range(self.cols))) ]
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
col
row_op
"""
self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], list(range(self.rows))))]
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
row
col_swap
"""
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
col
row_swap
"""
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
"""Delete the given row.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_del(1)
>>> M
Matrix([
[1, 0, 0],
[0, 0, 1]])
See Also
========
row
col_del
"""
self._mat = self._mat[:i*self.cols] + self._mat[(i + 1)*self.cols:]
self.rows -= 1
def col_del(self, i):
"""Delete the given column.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_del(1)
>>> M
Matrix([
[1, 0],
[0, 0],
[0, 1]])
See Also
========
col
row_del
"""
for j in range(self.rows - 1, -1, -1):
del self._mat[i + j*self.cols]
self.cols -= 1
# Utility functions
def simplify(self, ratio=1.7, measure=count_ops):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
sympy.simplify.simplify.simplify
"""
for i in range(len(self._mat)):
self._mat[i] = _simplify(self._mat[i], ratio=ratio,
measure=measure)
def fill(self, value):
"""Fill the matrix with the scalar value.
See Also
========
zeros
ones
"""
self._mat = [value]*len(self)
MutableMatrix = Matrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l, dtype=object): # pragma: no cover
"""Converts python list of SymPy expressions to a NumPy array.
See Also
========
matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m, dtype=object): # pragma: no cover
"""Converts SymPy's matrix to a NumPy array.
See Also
========
list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape): # pragma: no cover
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as SymPy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
Examples
--------
These doctests require numpy.
>>> from sympy import symarray
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] == b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] == b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))))
return arr
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis3
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis2
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis1
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
###############
# Functions
###############
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> from sympy.matrices import matrix_multiply_elementwise
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
__mul__
"""
if A.shape != B.shape:
raise ShapeError()
shape = A.shape
return classof(A, B)._new(shape[0], shape[1],
lambda i, j: A[i, j]*B[i, j])
def ones(r, c=None):
"""Returns a matrix of ones with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
zeros
eye
diag
"""
from .dense import Matrix
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return Matrix(r, c, [S.One]*r*c)
def zeros(r, c=None, cls=None):
"""Returns a matrix of zeros with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
ones
eye
diag
"""
if cls is None:
from .dense import Matrix as cls
return cls.zeros(r, c)
def eye(n, cls=None):
"""Create square identity matrix n x n
See Also
========
diag
zeros
ones
"""
if cls is None:
from sympy.matrices import Matrix as cls
return cls.eye(n)
def diag(*values, **kwargs):
"""Create a sparse, diagonal matrix from a list of diagonal values.
Notes
=====
When arguments are matrices they are fitted in resultant matrix.
The returned matrix is a mutable, dense matrix. To make it a different
type, send the desired class for keyword ``cls``.
Examples
========
>>> from sympy.matrices import diag, Matrix, ones
>>> diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag(*[1, 2, 3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The diagonal elements can be matrices; diagonal filling will
continue on the diagonal from the last element of the matrix:
>>> from sympy.abc import x, y, z
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> diag(a, 7, b, c)
Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
When diagonal elements are lists, they will be treated as arguments
to Matrix:
>>> diag([1, 2, 3], 4)
Matrix([
[1, 0],
[2, 0],
[3, 0],
[0, 4]])
>>> diag([[1, 2, 3]], 4)
Matrix([
[1, 2, 3, 0],
[0, 0, 0, 4]])
A given band off the diagonal can be made by padding with a
vertical or horizontal "kerning" vector:
>>> hpad = ones(0, 2)
>>> vpad = ones(2, 0)
>>> diag(vpad, 1, 2, 3, hpad) + diag(hpad, 4, 5, 6, vpad)
Matrix([
[0, 0, 4, 0, 0],
[0, 0, 0, 5, 0],
[1, 0, 0, 0, 6],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
The type is mutable by default but can be made immutable by setting
the ``mutable`` flag to False:
>>> type(diag(1))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> from sympy.matrices import ImmutableMatrix
>>> type(diag(1, cls=ImmutableMatrix))
<class 'sympy.matrices.immutable.ImmutableMatrix'>
See Also
========
eye
"""
from .sparse import MutableSparseMatrix
cls = kwargs.pop('cls', None)
if cls is None:
from .dense import Matrix as cls
if kwargs:
raise ValueError('unrecognized keyword%s: %s' % (
's' if len(kwargs) > 1 else '',
', '.join(kwargs.keys())))
rows = 0
cols = 0
values = list(values)
for i in range(len(values)):
m = values[i]
if isinstance(m, MatrixBase):
rows += m.rows
cols += m.cols
elif is_sequence(m):
m = values[i] = Matrix(m)
rows += m.rows
cols += m.cols
else:
rows += 1
cols += 1
res = MutableSparseMatrix.zeros(rows, cols)
i_row = 0
i_col = 0
for m in values:
if isinstance(m, MatrixBase):
res[i_row:i_row + m.rows, i_col:i_col + m.cols] = m
i_row += m.rows
i_col += m.cols
else:
res[i_row, i_col] = m
i_row += 1
i_col += 1
return cls._new(res)
def jordan_cell(eigenval, n):
"""
Create matrix of Jordan cell kind:
Examples
========
>>> from sympy.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
n = as_int(n)
out = zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = S.One
out[n - 1, n - 1] = eigenval
return out
def hessian(f, varlist, constraints=[]):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> from sympy import Function, hessian, pprint
>>> from sympy.abc import x, y
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]))
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
http://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
sympy.matrices.mutable.Matrix.jacobian
wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError("`varlist` must be a column or row vector.")
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError("`len(varlist)` must not be zero.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def GramSchmidt(vlist, orthog=False):
"""
Apply the Gram-Schmidt process to a set of vectors.
see: http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if not tmp.values():
raise ValueError(
"GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: http://en.wikipedia.org/wiki/Wronskian
See Also
========
sympy.matrices.mutable.Matrix.jacobian
hessian
"""
from .dense import Matrix
for index in range(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
from .dense import Matrix
seqs = list(map(sympify, seqs))
if not zero:
f = lambda i, j: seqs[j].subs(n, n + i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False, percent=100):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
Examples
========
>>> from sympy.matrices import randMatrix
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B # doctest:+SKIP
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[0, 68, 43]
[0, 68, 0]
[0, 91, 34]
"""
if c is None:
c = r
if seed is None:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
if symmetric and r != c:
raise ValueError(
'For symmetric matrices, r must equal c, but %i != %i' % (r, c))
if not symmetric:
m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))
else:
m = zeros(r)
for i in range(r):
for j in range(i, r):
m[i, j] = prng.randint(min, max)
for i in range(r):
for j in range(i):
m[i, j] = m[j, i]
if percent == 100:
return m
else:
z = int(r*c*percent // 100)
m._mat[:z] = [S.Zero]*z
prng.shuffle(m._mat)
return m
|
vipulroxx/sympy
|
sympy/matrices/dense.py
|
Python
|
bsd-3-clause
| 42,837
|
[
"DIRAC"
] |
d266a5a63800a51b706b1f725794846db715db8c9a1e406cde7d0abd6151670d
|
#!/usr/bin/env python3
import logging
import os
import pathlib
import numpy as np
import pytest
from pysisyphus.calculators.ORCA import ORCA
from pysisyphus.constants import ANG2BOHR
from pysisyphus.Geometry import Geometry
from pysisyphus.irc.GonzalesSchlegel import GonzalesSchlegel
from pysisyphus.irc.DampedVelocityVerlet import DampedVelocityVerlet
from pysisyphus.irc.Euler import Euler
from pysisyphus.irc.ParamPlot import ParamPlot
from qchelper.geometry import parse_xyz_file
def prepare_geometry(keywords=None, xyz_fn=None):
this_dir = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
if not keywords:
keywords = "HF STO-3G TightSCF"
if not xyz_fn:
xyz_fn = "hfabstraction_sto3g_ts.xyz"
#blocks = "%pal nprocs 3 end"
blocks = ""
atoms, coords = parse_xyz_file(this_dir / xyz_fn)
coords *= ANG2BOHR
geometry = Geometry(atoms, coords.flatten())
geometry.set_calculator(ORCA(keywords, charge=0, mult=1, blocks=blocks))
hessian = geometry.hessian
return geometry, this_dir
@pytest.mark.orca_irc
def test_hfabstraction_iso_gs_hfsto3g():
geometry, this_dir = prepare_geometry()
irc = GonzalesSchlegel(geometry, max_steps=5, step_length=0.3)
irc.run()
irc.write_trj(this_dir)
@pytest.mark.orca_irc
def test_hfabstraction_iso_dvv_hfsto3g():
geometry, this_dir = prepare_geometry()
irc = DampedVelocityVerlet(geometry, max_steps=5, v0=0.2)
irc.run()
irc.write_trj(this_dir)
@pytest.mark.orca_irc
def test_hfabstraction_iso_euler_hfsto3g():
geometry, this_dir = prepare_geometry()
irc = Euler(geometry, max_steps=100, step_length=0.025)
#irc = Euler(geometry, max_steps=2, step_length=0.01, mass_weight=False)
irc.run()
irc.write_trj(this_dir, "hf_sto3g_mw")
@pytest.mark.orca_irc
def test_hfabstraction_iso_euler_hfsto3g_no_mw():
geometry, this_dir = prepare_geometry()
irc = Euler(geometry, max_steps=100, mass_weight=False, step_length=0.025)
irc.run()
irc.write_trj(this_dir, "hf_sto3g_nomw")
@pytest.mark.orca_irc
def test_hfabstraction_iso_euler_hf422gsp():
xyz_fn = "07_hfabstraction_hf422gsp_ts.xyz"
keywords = "HF 4-22GSP TightSCF"
geometry, this_dir = prepare_geometry(keywords, xyz_fn)
irc = Euler(geometry, max_steps=175, step_length=0.05)
irc.run()
irc.write_trj(this_dir, "hf_422gsp_mw")
@pytest.mark.orca_irc
def test_hfabstraction_iso_euler_hf422gsp_no_mw():
xyz_fn = "07_hfabstraction_hf422gsp_ts.xyz"
keywords = "HF 4-22GSP TightSCF"
prefix = "hf_422gsp_nomw"
geometry, this_dir = prepare_geometry(keywords, xyz_fn)
irc = Euler(geometry, max_steps=125, mass_weight=False, step_length=0.025)
irc.run()
irc.write_trj(this_dir, prefix)
p1inds = (3, 7)
p2inds = (0, 3, 7)
param_plot = ParamPlot(irc.all_coords, p1inds, p2inds)
param_plot.plot()
param_plot.show()
param_plot.save(this_dir, prefix)
@pytest.mark.orca_irc
def tmp():
xyz_fn = "07_hfabstraction_hf422gsp_ts.xyz"
keywords = "HF 4-22GSP TightSCF"
geometry, this_dir = prepare_geometry(keywords, xyz_fn)
irc = Euler(geometry, max_steps=1, step_length=0.025, forward=True, mass_weight=False)
irc.run()
irc.write_trj(this_dir, "hf_422gsp_mw")
if __name__ == "__main__":
#test_hfabstraction_iso_gs_hfsto3g()
#test_hfabstraction_iso_dvv_hfsto3g()
#test_hfabstraction_iso_euler_hfsto3g()
#test_hfabstraction_iso_euler_hfsto3g_no_mw()
test_hfabstraction_iso_euler_hf422gsp()
test_hfabstraction_iso_euler_hf422gsp_no_mw()
#tmp()
|
eljost/pysisyphus
|
tests_staging/irc_hfabstraction/test_hfabstraction_irc.py
|
Python
|
gpl-3.0
| 3,585
|
[
"ORCA"
] |
0adaa99829bf234d6564ffde70865b6cf4990c2c077ecac337429e2873bcfe13
|
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wide ResNet 28-10 with SNGP on CIFAR-10.
Spectral-normalized neural GP (SNGP) [1] is a simple method to improve
a deterministic neural network's uncertainty by applying spectral
normalization to the hidden layers, and then replace the dense output layer
with a Gaussian process layer.
## Combining with MC Dropout:
As a single-model method, SNGP can be combined with other classic
uncertainty techniques (e.g., Monte Carlo dropout, deep ensemble) to further
improve performance.
This script supports adding Monte Carlo dropout to
SNGP by setting `use_mc_dropout=True`, setting `num_dropout_samples=10`
(or any integer larger than 1). Additionally we recommend adjust
`gp_mean_field_factor` slightly, since averaging already calibrated
individual models (in this case single SNGPs) can sometimes lead to
under-confidence [3].
## References:
[1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with
Deterministic Deep Learning via Distance Awareness.
_arXiv preprint arXiv:2006.10108_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
[3]: Rahul Rahaman, Alexandre H. Thiery. Uncertainty Quantification and Deep
Ensembles. _arXiv preprint arXiv:2007.08792_, 2020.
https://arxiv.org/abs/2007.08792
[4]: Hendrycks, Dan et al. AugMix: A Simple Data Processing Method to Improve
Robustness and Uncertainty. In _International Conference on Learning
Representations_, 2020.
https://arxiv.org/abs/1912.02781
[5]: Zhang, Hongyi et al. mixup: Beyond Empirical Risk Minimization. In
_International Conference on Learning Representations_, 2018.
https://arxiv.org/abs/1710.09412
"""
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
from experimental.marginalization_mixup import data_utils # local file import
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
from uncertainty_baselines import schedules
from uncertainty_baselines.baselines.cifar import utils
import uncertainty_metrics as um
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('train_proportion', default=1.0,
help='only use a proportion of training set.')
flags.DEFINE_float('base_learning_rate', 0.04,
'Base learning rate when total batch size is 128. It is '
'scaled by the ratio of the total batch size to 128.')
flags.DEFINE_integer('lr_warmup_epochs', 1,
'Number of epochs for a linear warmup to the initial '
'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('lr_decay_ratio', 0.2, 'Amount to decay learning rate.')
flags.DEFINE_list('lr_decay_epochs', ['60', '120', '160'],
'Epochs to decay learning rate by.')
flags.DEFINE_float('l2', 3e-4, 'L2 regularization coefficient.')
flags.DEFINE_enum('dataset', 'cifar10',
enum_values=['cifar10', 'cifar100'],
help='Dataset.')
flags.DEFINE_string('cifar100_c_path', None,
'Path to the TFRecords files for CIFAR-100-C. Only valid '
'(and required) if dataset is cifar100 and corruptions.')
flags.DEFINE_integer('corruptions_interval', 250,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer('checkpoint_interval', 250,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
flags.DEFINE_string('output_dir', '/tmp/cifar', 'Output directory.')
flags.DEFINE_integer('train_epochs', 250, 'Number of training epochs.')
# Data Augmentation flags.
flags.DEFINE_bool('augmix', False,
'Whether to perform AugMix [4] on the input data.')
flags.DEFINE_integer('aug_count', 1,
'Number of augmentation operations in AugMix to perform '
'on the input image. In the simgle model context, it'
'should be 1. In the ensembles context, it should be'
'ensemble_size if we perform random_augment only; It'
'should be (ensemble_size - 1) if we perform augmix.')
flags.DEFINE_float('augmix_prob_coeff', 0.5, 'Augmix probability coefficient.')
flags.DEFINE_integer('augmix_depth', -1,
'Augmix depth, -1 meaning sampled depth. This corresponds'
'to line 7 in the Algorithm box in [4].')
flags.DEFINE_integer('augmix_width', 3,
'Augmix width. This corresponds to the k in line 5 in the'
'Algorithm box in [4].')
flags.DEFINE_bool('random_augment', False, 'Whether to use random augment.')
flags.DEFINE_float('mixup_alpha', 0., 'Mixup hyperparameter, 0. to diable.')
flags.DEFINE_bool('adaptive_mixup', False, 'Whether to use adaptive mixup.')
flags.DEFINE_bool('validation', False, 'Whether to use validation set.')
flags.DEFINE_bool('forget_mixup', False,
'Whether to mixup data based on forgetting counts. Only one '
'of the forget_mix or adaptive_mixup can be True.')
flags.DEFINE_integer('forget_threshold', 2, '1 / forget_threshold of training'
'examples will be applied mixup')
# Dropout flags
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate.')
flags.DEFINE_integer('num_dropout_samples', 1,
'Number of dropout samples to use for prediction.')
flags.DEFINE_integer('num_dropout_samples_training', 1,
'Number of dropout samples for training.')
# SNGP flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 2.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', 128,
'The dimension to reduce the neural network input for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1, no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', True,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the classic '
'GP learning.')
flags.DEFINE_float('gp_cov_ridge_penalty', 1e-3,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', 0.999,
'The discount factor to compute the moving average of precision matrix.')
flags.DEFINE_float(
'gp_mean_field_factor', 0.001,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 8, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
aug_params = {
'augmix': FLAGS.augmix,
'aug_count': FLAGS.aug_count,
'augmix_depth': FLAGS.augmix_depth,
'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
'augmix_width': FLAGS.augmix_width,
'ensemble_size': 1,
'mixup_alpha': FLAGS.mixup_alpha,
'adaptive_mixup': FLAGS.adaptive_mixup,
'random_augment': FLAGS.random_augment,
'forget_mixup': FLAGS.forget_mixup,
'num_cores': FLAGS.num_cores,
'threshold': FLAGS.forget_threshold,
}
batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores
// FLAGS.num_dropout_samples_training)
train_input_fn = data_utils.load_input_fn(
split=tfds.Split.TRAIN,
name=FLAGS.dataset,
batch_size=batch_size,
use_bfloat16=FLAGS.use_bfloat16,
proportion=FLAGS.train_proportion,
validation_set=FLAGS.validation,
aug_params=aug_params)
if FLAGS.validation:
validation_input_fn = data_utils.load_input_fn(
split=tfds.Split.VALIDATION,
name=FLAGS.dataset,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=FLAGS.use_bfloat16,
validation_set=True)
val_dataset = strategy.experimental_distribute_datasets_from_function(
validation_input_fn)
clean_test_dataset = utils.load_dataset(
split=tfds.Split.TEST,
name=FLAGS.dataset,
batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores,
use_bfloat16=FLAGS.use_bfloat16)
train_dataset = strategy.experimental_distribute_dataset(
train_input_fn())
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
}
if FLAGS.corruptions_interval > 0:
if FLAGS.dataset == 'cifar10':
load_c_dataset = utils.load_cifar10_c
else:
load_c_dataset = functools.partial(utils.load_cifar100_c,
path=FLAGS.cifar100_c_path)
corruption_types, max_intensity = utils.load_corrupted_test_info(
FLAGS.dataset)
for corruption in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset = load_c_dataset(
corruption_name=corruption,
corruption_intensity=intensity,
batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets['{0}_{1}'.format(corruption, intensity)] = (
strategy.experimental_distribute_dataset(dataset))
ds_info = tfds.builder(FLAGS.dataset).info
batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores
// FLAGS.num_dropout_samples_training)
num_train_examples = ds_info.splits['train'].num_examples
# Train_proportion is a float so need to convert steps_per_epoch to int.
if FLAGS.validation:
# TODO(ywenxu): Remove hard-coding validation images.
steps_per_epoch = int((num_train_examples *
FLAGS.train_proportion - 2500) // batch_size)
steps_per_val = 2500 // (FLAGS.per_core_batch_size * FLAGS.num_cores)
else:
steps_per_epoch = int(
num_train_examples * FLAGS.train_proportion) // batch_size
steps_per_eval = ds_info.splits['test'].num_examples // batch_size
num_classes = ds_info.features['label'].num_classes
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building ResNet model')
if FLAGS.use_spec_norm:
logging.info('Use Spectral Normalization with norm bound %.2f',
FLAGS.spec_norm_bound)
if FLAGS.use_gp_layer:
logging.info('Use GP layer with hidden units %d', FLAGS.gp_hidden_dim)
model = ub.models.wide_resnet_sngp(
input_shape=ds_info.features['image'].shape,
batch_size=batch_size,
depth=28,
width_multiplier=10,
num_classes=num_classes,
l2=FLAGS.l2,
use_mc_dropout=FLAGS.use_mc_dropout,
dropout_rate=FLAGS.dropout_rate,
use_gp_layer=FLAGS.use_gp_layer,
gp_input_dim=FLAGS.gp_input_dim,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Linearly scale learning rate and the decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 128
lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
for start_epoch_str in FLAGS.lr_decay_epochs]
lr_schedule = schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch,
base_lr,
decay_ratio=FLAGS.lr_decay_ratio,
decay_epochs=lr_decay_epochs,
warmup_epochs=FLAGS.lr_warmup_epochs)
optimizer = tf.keras.optimizers.SGD(lr_schedule,
momentum=0.9,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/stddev': tf.keras.metrics.Mean(),
}
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
if FLAGS.forget_mixup:
images, labels, idx = inputs
else:
images, labels = inputs
if FLAGS.augmix and FLAGS.aug_count >= 1:
# Index 0 at augmix preprocessing is the unperturbed image.
images = images[:, 1, ...]
# This is for the case of combining AugMix and Mixup.
if FLAGS.mixup_alpha > 0:
labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
images = tf.tile(images, [FLAGS.num_dropout_samples_training, 1, 1, 1])
if FLAGS.mixup_alpha > 0:
labels = tf.tile(labels, [FLAGS.num_dropout_samples_training, 1])
else:
labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])
with tf.GradientTape() as tape:
logits, _ = model(images, training=True)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
if FLAGS.mixup_alpha > 0:
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(labels,
logits,
from_logits=True))
else:
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
l2_loss = sum(model.losses)
loss = negative_log_likelihood + l2_loss
# Scale the loss given the TPUStrategy will reduce sum all gradients.
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(logits)
if FLAGS.mixup_alpha > 0:
labels = tf.argmax(labels, axis=-1)
metrics['train/ece'].update_state(labels, probs)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
if FLAGS.forget_mixup:
train_predictions = tf.argmax(probs, -1)
labels = tf.cast(labels, train_predictions.dtype)
# For each ensemble member (1 here), we accumulate the accuracy counts.
accuracy_counts = tf.cast(tf.reshape(
(train_predictions == labels), [1, -1]),
tf.float32)
return accuracy_counts, idx
if FLAGS.forget_mixup:
return strategy.run(step_fn, args=(next(iterator),))
else:
strategy.run(step_fn, args=(next(iterator),))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images, labels = inputs
logits_list = []
stddev_list = []
for _ in range(FLAGS.num_dropout_samples):
logits, covmat = model(images, training=False)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
logits = ed.layers.utils.mean_field_logits(
logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor)
stddev = tf.sqrt(tf.linalg.diag_part(covmat))
stddev_list.append(stddev)
logits_list.append(logits)
# Logits dimension is (num_samples, batch_size, num_classes).
logits_list = tf.stack(logits_list, axis=0)
stddev_list = tf.stack(stddev_list, axis=0)
stddev = tf.reduce_mean(stddev_list, axis=0)
probs_list = tf.nn.softmax(logits_list)
probs = tf.reduce_mean(probs_list, axis=0)
labels_broadcasted = tf.broadcast_to(
labels, [FLAGS.num_dropout_samples, labels.shape[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits_list, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
tf.math.log(float(FLAGS.num_dropout_samples)))
if dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
metrics['test/stddev'].update_state(stddev)
elif dataset_name != 'validation':
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/stddev_{}'.format(dataset_name)].update_state(
stddev)
if dataset_name == 'validation':
return tf.reshape(probs, [1, -1, num_classes]), labels
if dataset_name == 'validation':
return strategy.run(step_fn, args=(next(iterator),))
else:
strategy.run(step_fn, args=(next(iterator),))
metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
train_iterator = iter(train_dataset)
forget_counts_history = []
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
acc_counts_list = []
idx_list = []
for step in range(steps_per_epoch):
if FLAGS.forget_mixup:
temp_accuracy_counts, temp_idx = train_step(train_iterator)
acc_counts_list.append(temp_accuracy_counts)
idx_list.append(temp_idx)
else:
train_step(train_iterator)
current_step = epoch * steps_per_epoch + (step + 1)
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
if step % 20 == 0:
logging.info(message)
# Only one of the forget_mixup and adaptive_mixup can be true.
if FLAGS.forget_mixup:
current_acc = [tf.concat(list(acc_counts_list[i].values), axis=1)
for i in range(len(acc_counts_list))]
total_idx = [tf.concat(list(idx_list[i].values), axis=0)
for i in range(len(idx_list))]
current_acc = tf.cast(tf.concat(current_acc, axis=1), tf.int32)
total_idx = tf.concat(total_idx, axis=0)
current_forget_path = os.path.join(FLAGS.output_dir,
'forget_counts.npy')
last_acc_path = os.path.join(FLAGS.output_dir, 'last_acc.npy')
if epoch == 0:
forget_counts = tf.zeros(
[1, num_train_examples], dtype=tf.int32)
last_acc = tf.zeros(
[1, num_train_examples], dtype=tf.int32)
else:
if 'last_acc' not in locals():
with tf.io.gfile.GFile(last_acc_path, 'rb') as f:
last_acc = np.load(f)
last_acc = tf.cast(tf.convert_to_tensor(last_acc), tf.int32)
if 'forget_counts' not in locals():
with tf.io.gfile.GFile(current_forget_path, 'rb') as f:
forget_counts = np.load(f)
forget_counts = tf.cast(tf.convert_to_tensor(forget_counts), tf.int32)
selected_last_acc = tf.gather(last_acc, total_idx, axis=1)
forget_this_epoch = tf.cast(current_acc < selected_last_acc, tf.int32)
forget_this_epoch = tf.transpose(forget_this_epoch)
target_shape = tf.constant([num_train_examples, 1])
current_forget_counts = tf.scatter_nd(tf.reshape(total_idx, [-1, 1]),
forget_this_epoch, target_shape)
current_forget_counts = tf.transpose(current_forget_counts)
acc_this_epoch = tf.transpose(current_acc)
last_acc = tf.scatter_nd(tf.reshape(total_idx, [-1, 1]),
acc_this_epoch, target_shape)
# This is lower bound of true acc.
last_acc = tf.transpose(last_acc)
# TODO(ywenxu): We count the dropped examples as forget. Fix this later.
forget_counts += current_forget_counts
forget_counts_history.append(forget_counts)
logging.info('forgetting counts')
logging.info(tf.stack(forget_counts_history, 0))
with tf.io.gfile.GFile(os.path.join(
FLAGS.output_dir, 'forget_counts_history.npy'), 'wb') as f:
np.save(f, tf.stack(forget_counts_history, 0).numpy())
with tf.io.gfile.GFile(current_forget_path, 'wb') as f:
np.save(f, forget_counts.numpy())
with tf.io.gfile.GFile(last_acc_path, 'wb') as f:
np.save(f, last_acc.numpy())
aug_params['forget_counts_dir'] = current_forget_path
train_input_fn = data_utils.load_input_fn(
split=tfds.Split.TRAIN,
name=FLAGS.dataset,
batch_size=batch_size,
use_bfloat16=FLAGS.use_bfloat16,
validation_set=FLAGS.validation,
aug_params=aug_params)
train_dataset = strategy.experimental_distribute_dataset(
train_input_fn())
train_iterator = iter(train_dataset)
elif FLAGS.adaptive_mixup:
val_iterator = iter(val_dataset)
logging.info('Testing on validation dataset')
predictions_list = []
labels_list = []
for step in range(steps_per_val):
temp_predictions, temp_labels = test_step(val_iterator, 'validation')
predictions_list.append(temp_predictions)
labels_list.append(temp_labels)
predictions = [tf.concat(list(predictions_list[i].values), axis=1)
for i in range(len(predictions_list))]
labels = [tf.concat(list(labels_list[i].values), axis=0)
for i in range(len(labels_list))]
predictions = tf.concat(predictions, axis=1)
labels = tf.cast(tf.concat(labels, axis=0), tf.int64)
def compute_acc_conf(preds, label, focus_class):
class_preds = tf.boolean_mask(preds, label == focus_class, axis=1)
class_pred_labels = tf.argmax(class_preds, axis=-1)
confidence = tf.reduce_mean(tf.reduce_max(class_preds, axis=-1), -1)
accuracy = tf.reduce_mean(tf.cast(
class_pred_labels == focus_class, tf.float32), axis=-1)
return accuracy - confidence
calibration_per_class = [compute_acc_conf(
predictions, labels, i) for i in range(num_classes)]
calibration_per_class = tf.stack(calibration_per_class, axis=1)
logging.info('calibration per class')
logging.info(calibration_per_class)
mixup_coeff = tf.where(calibration_per_class > 0, 1.0, FLAGS.mixup_alpha)
mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1)
logging.info('mixup coeff')
logging.info(mixup_coeff)
aug_params['mixup_coeff'] = mixup_coeff
train_input_fn = data_utils.load_input_fn(
split=tfds.Split.TRAIN,
name=FLAGS.dataset,
batch_size=batch_size,
use_bfloat16=FLAGS.use_bfloat16,
validation_set=True,
aug_params=aug_params)
train_dataset = strategy.experimental_distribute_dataset(
train_input_fn())
train_iterator = iter(train_dataset)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
for step in range(steps_per_eval):
if step % 20 == 0:
logging.info('Starting to run eval step %s of epoch: %s', step,
epoch)
test_start_time = time.time()
test_step(test_iterator, dataset_name)
ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
metrics['test/ms_per_example'].update_state(ms_per_example)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
if __name__ == '__main__':
app.run(main)
|
google/edward2
|
experimental/marginalization_mixup/sngp.py
|
Python
|
apache-2.0
| 30,800
|
[
"Gaussian"
] |
2c76a0b825a85532722c238eea32e1e9669b73d52efc3a9e38c16123fc7ed465
|
"""
This is a financial market model written in indra.
It is intended to demonstrate how the interaction of value
investors and trend followers can produce cyclical price
changes.
"""
from math import isclose
from indra.agent import Agent
from indra.composite import Composite
from indra.display_methods import BLUE, RED
from indra.env import Env, UNLIMITED
from registry.registry import get_env, get_prop
from registry.registry import run_notice, user_log_notif
from indra.utils import gaussian
from indra.utils import init_props
MODEL_NAME = "fmarket"
DEF_NUM_TREND_FOLLOWER = 10
DEF_NUM_VALUE_INVESTOR = 10
DEF_CAPITAL = 1000
DEF_PRICE = 8 # a starting point
DEF_PERIODS = 3
DEF_NUM_ASSET = 10
DEF_MIN_PRICE_MOVE = .2
DEF_MAX_PRICE_MOVE = .4
INF = 1000000000 # just some very big num!
DEF_REAL_VALUE = 10
DEF_DISCOUNT = .002
DEF_SIGMA = .8
MARKET_MAKER = "market_maker"
ASSET_PRICE = "Asset Price"
def trend_direction(agent, cur_price, price_hist):
"""
Calculate the trend.
If the trend is upward,return 1
Else return 0.
"""
period = agent["change_period"]
if round(len(price_hist) - period) >= 0:
prev_price = price_hist[round(len(price_hist) - period)]
else:
prev_price = INF
if cur_price > prev_price:
return 1
else:
return 0
def buy(agent):
market_maker = get_env()[MARKET_MAKER]
price = market_maker["asset_price"] * DEF_NUM_ASSET
if agent["capital"] >= price:
agent["capital"] -= price
agent["num_stock"] += DEF_NUM_ASSET
market_maker["buy"] += 1
def sell(agent):
market_maker = get_env()[MARKET_MAKER]
price = market_maker["asset_price"] * DEF_NUM_ASSET
if agent["num_stock"] >= DEF_NUM_ASSET:
market_maker["sell"] += 1
agent["capital"] += price
agent["num_stock"] -= DEF_NUM_ASSET
def market_report(env):
market_maker = get_env()[MARKET_MAKER]
return "Asset price on the market: " \
+ str(round(market_maker["asset_price"], 4)) + "\n"
def calc_price_change(ratio, min_price_move=DEF_MIN_PRICE_MOVE,
max_price_move=DEF_MAX_PRICE_MOVE):
"""
Make the price move in proportion to the ratio, up to a ceiling
of max_price_move.
"""
direction = 1
if isclose(ratio, 1.0):
return 0
if ratio < 1:
if ratio == 0:
ratio = INF
else:
ratio = 1 / ratio
direction = -1
return direction * min(max_price_move, min_price_move * ratio)
def create_market_maker(name):
"""
Create a market maker.
"""
market_maker = Agent(name, action=market_maker_action)
market_maker["buy"] = 0
market_maker["sell"] = 0
market_maker["asset_price"] = DEF_PRICE
market_maker["prev_asset_price"] = DEF_PRICE
market_maker["price_hist"] = [DEF_PRICE]
return market_maker
def create_trend_follower(name, i):
"""
Create a trend follower.
"""
average_period = get_prop("average_period", DEF_PERIODS)
dev = get_prop("deviation_follower", DEF_SIGMA)
trend_follower = Agent(name + str(i),
action=trend_follower_action)
trend_follower["change_period"] = gaussian(average_period, dev)
trend_follower["capital"] = DEF_CAPITAL
trend_follower["num_stock"] = 0
return trend_follower
def create_value_investor(name, i):
"""
Create a value investor.
"""
value_investor = Agent(name + str(i), action=value_investor_action)
mean_price = get_prop("discount", DEF_DISCOUNT)
dev = get_prop("deviation_investor", DEF_SIGMA)
low_val_percentage = gaussian(mean_price, dev)
high_val_percentage = gaussian(mean_price, dev)
value_investor["low_price"] = DEF_REAL_VALUE * (1 - low_val_percentage)
value_investor["high_price"] = DEF_REAL_VALUE * (1 + high_val_percentage)
value_investor["capital"] = DEF_CAPITAL
value_investor["num_stock"] = 0
return value_investor
def market_maker_action(agent):
# Determine the current price asset
market_maker = get_env()[MARKET_MAKER]
market_maker["prev_asset_price"] = market_maker["asset_price"]
ratio = 1
if agent["sell"] == 0:
if agent["buy"] != 0:
ratio = INF
else:
ratio = agent["buy"] / agent["sell"]
agent["asset_price"] += calc_price_change(ratio)
agent["price_hist"].append(round(agent["asset_price"], 4))
agent["buy"] = 0
agent["sell"] = 0
return True
def trend_follower_action(agent):
# Determine if trend followers should buy
# or sell the stock
market_maker = get_env()[MARKET_MAKER]
if trend_direction(agent, market_maker["asset_price"],
market_maker["price_hist"]) == 1:
buy(agent)
else:
sell(agent)
return True
def value_investor_action(agent):
# Determine if value investors should buy or sell the stock
market_maker = get_env()[MARKET_MAKER]
if market_maker["asset_price"] >= agent["high_price"]:
sell(agent)
elif market_maker["asset_price"] <= agent["low_price"]:
buy(agent)
return True
def initial_price(pop_hist):
"""
Set up our pop hist object to record exchanges per period.
"""
pop_hist.record_pop(ASSET_PRICE, DEF_PRICE)
def record_price(pop_hist):
"""
This is our hook into the env to record the number of exchanges each
period.
"""
market_maker = get_env()[MARKET_MAKER]
pop_hist.record_pop(ASSET_PRICE, market_maker["asset_price"])
def set_env_attrs():
user_log_notif("Setting env attrs for " + MODEL_NAME)
env = get_env()
env.set_attr("pop_hist_func", record_price)
env.set_attr("census_func", market_report)
def set_up(props=None):
"""
A func to set up run that can also be used by test code.
"""
init_props(MODEL_NAME, props)
groups = []
groups.append(Composite("value_investors", {"color": BLUE},
member_creator=create_value_investor,
num_members=get_prop("value_investors",
DEF_NUM_VALUE_INVESTOR)))
groups.append(Composite("trend_followers", {"color": RED},
member_creator=create_trend_follower,
num_members=get_prop("trend_followers",
DEF_NUM_TREND_FOLLOWER)))
groups.append(create_market_maker(MARKET_MAKER))
Env(MODEL_NAME,
members=groups,
width=UNLIMITED,
height=UNLIMITED,
pop_hist_setup=initial_price)
get_env().exclude_menu_item("scatter_plot")
set_env_attrs()
def main():
set_up()
run_notice(MODEL_NAME)
get_env()()
return 0
if __name__ == "__main__":
main()
|
gcallah/Indra
|
models/fmarket.py
|
Python
|
gpl-3.0
| 6,817
|
[
"Gaussian"
] |
f7b16c71bb7afc035f9db7e6361759d694284d9b4e0d1c45bb97dc9bfeb9117e
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**************************************
**PLogger** - Object
**************************************
This module defines the parallel logger PLogger.
It can be used to switch on logging on all CPUs.
espressopp.PLogger.set(LoggerName, LoggerLevel)
LoggerName : the name of the logger, if LoggerName='' than all loggers are set
LoggerLevel : possible values are 'FATAL', 'ERROR', 'WARN', 'INFO', 'TRACE', 'DEBUG'
'DEBUG' produces most output
'FATAL' produces least output
Example:
>>> espressopp.PLogger.set('LennardJonesGeneric', 'INFO')
>>> pot = espressopp.interaction.LennardJonesGeneric(1.0, 1.0, 12, 6, 1.12246)
>>> print pot.computeEnergy(1.0)
>>> espressopp.PLogger.set('LennardJonesGeneric', 'ERROR')
.. function:: espressopp.set(thelogger, level)
:param thelogger:
:param level:
:type thelogger:
:type level:
"""
import espressopp
def set(thelogger,level):
hw = espressopp.pmi.create('logging.getLogger',thelogger)
espressopp.pmi.call(hw,'setLevel',level)
|
junghans/espressopp
|
src/PLogger.py
|
Python
|
gpl-3.0
| 1,899
|
[
"ESPResSo"
] |
944b3c255128510ecf1cf8bbca86f6e9b44e24b76d9508a95502bdcafc52fd37
|
from __future__ import division
import numpy as np
from scipy.ndimage import gaussian_filter, gaussian_laplace
import math
from math import sqrt, log
from scipy import spatial
from ..util import img_as_float
from .peak import peak_local_max
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import assert_nD
# This basic blob detection algorithm is based on:
# http://www.cs.utah.edu/~jfishbau/advimproc/project1/ (04.04.2013)
# Theory behind: http://en.wikipedia.org/wiki/Blob_detection (04.04.2013)
def _compute_disk_overlap(d, r1, r2):
"""
Compute surface overlap between two disks of radii ``r1`` and ``r2``,
with centers separated by a distance ``d``.
Parameters
----------
d : float
Distance between centers.
r1 : float
Radius of the first disk.
r2 : float
Radius of the second disk.
Returns
-------
vol: float
Volume of the overlap between the two disks.
"""
ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1)
ratio1 = np.clip(ratio1, -1, 1)
acos1 = math.acos(ratio1)
ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2)
ratio2 = np.clip(ratio2, -1, 1)
acos2 = math.acos(ratio2)
a = -d + r2 + r1
b = d - r2 + r1
c = d + r2 - r1
d = d + r2 + r1
area = (r1 ** 2 * acos1 + r2 ** 2 * acos2 -
0.5 * sqrt(abs(a * b * c * d)))
return area / (math.pi * (min(r1, r2) ** 2))
def _compute_sphere_overlap(d, r1, r2):
"""
Compute volume overlap between two spheres of radii ``r1`` and ``r2``,
with centers separated by a distance ``d``.
Parameters
----------
d : float
Distance between centers.
r1 : float
Radius of the first sphere.
r2 : float
Radius of the second sphere.
Returns
-------
vol: float
Volume of the overlap between the two spheres.
Notes
-----
See for example http://mathworld.wolfram.com/Sphere-SphereIntersection.html
for more details.
"""
vol = (math.pi / (12 * d) * (r1 + r2 - d)**2 *
(d**2 + 2 * d * (r1 + r2) - 3 * (r1**2 + r2**2) + 6 * r1 * r2))
return vol / (4./3 * math.pi * min(r1, r2) ** 3)
def _blob_overlap(blob1, blob2):
"""Finds the overlapping area fraction between two blobs.
Returns a float representing fraction of overlapped area.
Parameters
----------
blob1 : sequence of arrays
A sequence of ``(row, col, sigma)`` or ``(pln, row, col, sigma)``,
where ``row, col`` (or ``(pln, row, col)``) are coordinates
of blob and ``sigma`` is the standard deviation of the Gaussian kernel
which detected the blob.
blob2 : sequence of arrays
A sequence of ``(row, col, sigma)`` or ``(pln, row, col, sigma)``,
where ``row, col`` (or ``(pln, row, col)``) are coordinates
of blob and ``sigma`` is the standard deviation of the Gaussian kernel
which detected the blob.
Returns
-------
f : float
Fraction of overlapped area (or volume in 3D).
"""
n_dim = len(blob1) - 1
root_ndim = sqrt(n_dim)
# extent of the blob is given by sqrt(2)*scale
r1 = blob1[-1] * root_ndim
r2 = blob2[-1] * root_ndim
d = sqrt(np.sum((blob1[:-1] - blob2[:-1])**2))
if d > r1 + r2:
return 0
# one blob is inside the other, the smaller blob must die
if d <= abs(r1 - r2):
return 1
if n_dim == 2:
return _compute_disk_overlap(d, r1, r2)
else: # http://mathworld.wolfram.com/Sphere-SphereIntersection.html
return _compute_sphere_overlap(d, r1, r2)
def _prune_blobs(blobs_array, overlap):
"""Eliminated blobs with area overlap.
Parameters
----------
blobs_array : ndarray
A 2d array with each row representing 3 (or 4) values,
``(row, col, sigma)`` or ``(pln, row, col, sigma)`` in 3D,
where ``(row, col)`` (``(pln, row, col)``) are coordinates of the blob
and ``sigma`` is the standard deviation of the Gaussian kernel which
detected the blob.
This array must not have a dimension of size 0.
overlap : float
A value between 0 and 1. If the fraction of area overlapping for 2
blobs is greater than `overlap` the smaller blob is eliminated.
Returns
-------
A : ndarray
`array` with overlapping blobs removed.
"""
sigma = blobs_array[:, -1].max()
distance = 2 * sigma * sqrt(blobs_array.shape[1] - 1)
tree = spatial.cKDTree(blobs_array[:, :-1])
pairs = np.array(list(tree.query_pairs(distance)))
if len(pairs) == 0:
return blobs_array
else:
for (i, j) in pairs:
blob1, blob2 = blobs_array[i], blobs_array[j]
if _blob_overlap(blob1, blob2) > overlap:
if blob1[-1] > blob2[-1]:
blob2[-1] = 0
else:
blob1[-1] = 0
return np.array([b for b in blobs_array if b[-1] > 0])
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
overlap=.5,):
"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel. Keep this low to
detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel. Keep this high to
detect larger blobs.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
Returns
-------
A : (n, image.ndim + 1) ndarray
A 2d array with each row representing 3 values for a 2D image,
and 4 values for a 3D image: ``(r, c, sigma)`` or ``(p, r, c, sigma)``
where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and
``sigma`` is the standard deviation of the Gaussian kernel which
detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
Examples
--------
>>> from skimage import data, feature
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
array([[ 267. , 359. , 16.777216],
[ 267. , 115. , 10.48576 ],
[ 263. , 302. , 16.777216],
[ 263. , 245. , 16.777216],
[ 261. , 173. , 16.777216],
[ 260. , 46. , 16.777216],
[ 198. , 155. , 10.48576 ],
[ 196. , 43. , 10.48576 ],
[ 195. , 102. , 16.777216],
[ 194. , 277. , 16.777216],
[ 193. , 213. , 16.777216],
[ 185. , 347. , 16.777216],
[ 128. , 154. , 10.48576 ],
[ 127. , 102. , 10.48576 ],
[ 125. , 208. , 10.48576 ],
[ 125. , 45. , 16.777216],
[ 124. , 337. , 10.48576 ],
[ 120. , 272. , 16.777216],
[ 58. , 100. , 10.48576 ],
[ 54. , 276. , 10.48576 ],
[ 54. , 42. , 16.777216],
[ 52. , 216. , 16.777216],
[ 52. , 155. , 16.777216],
[ 45. , 336. , 16.777216]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
for i in range(k + 1)])
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
* sigma_list[i] for i in range(k)]
image_cube = np.stack(dog_images, axis=-1)
# local_maxima = get_local_maxima(image_cube, threshold)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * (image.ndim + 1)),
threshold_rel=0.0,
exclude_border=False)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, -1] = sigma_list[local_maxima[:, -1]]
return _prune_blobs(lm, overlap)
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
overlap=.5, log_scale=False):
"""Finds blobs in the given grayscale image.
Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel. Keep this low to
detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel. Keep this high to
detect larger blobs.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
Returns
-------
A : (n, image.ndim + 1) ndarray
A 2d array with each row representing 3 values for a 2D image,
and 4 values for a 3D image: ``(r, c, sigma)`` or ``(p, r, c, sigma)``
where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and
``sigma`` is the standard deviation of the Gaussian kernel which
detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
Examples
--------
>>> from skimage import data, feature, exposure
>>> img = data.coins()
>>> img = exposure.equalize_hist(img) # improves detection
>>> feature.blob_log(img, threshold = .3)
array([[ 266. , 115. , 11.88888889],
[ 263. , 302. , 17.33333333],
[ 263. , 244. , 17.33333333],
[ 260. , 174. , 17.33333333],
[ 198. , 155. , 11.88888889],
[ 198. , 103. , 11.88888889],
[ 197. , 44. , 11.88888889],
[ 194. , 276. , 17.33333333],
[ 194. , 213. , 17.33333333],
[ 185. , 344. , 17.33333333],
[ 128. , 154. , 11.88888889],
[ 127. , 102. , 11.88888889],
[ 126. , 208. , 11.88888889],
[ 126. , 46. , 11.88888889],
[ 124. , 336. , 11.88888889],
[ 121. , 272. , 17.33333333],
[ 113. , 323. , 1. ]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
if log_scale:
start, stop = log(min_sigma, 10), log(max_sigma, 10)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
# computing gaussian laplace
# s**2 provides scale invariance
gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list]
image_cube = np.stack(gl_images, axis=-1)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * (image.ndim + 1)),
threshold_rel=0.0,
exclude_border=False)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, -1] = sigma_list[local_maxima[:, -1]]
return _prune_blobs(lm, overlap)
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
overlap=.5, log_scale=False):
"""Finds blobs in the given grayscale image.
Blobs are found using the Determinant of Hessian method [1]_. For each blob
found, the method returns its coordinates and the standard deviation
of the Gaussian Kernel used for the Hessian matrix whose determinant
detected the blob. Determinant of Hessians is approximated using [2]_.
Parameters
----------
image : 2D ndarray
Input grayscale image.Blobs can either be light on dark or vice versa.
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this low to detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel used to compute
Hessian matrix. Keep this high to detect larger blobs.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect less prominent blobs.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y,x,sigma)``
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel of the Hessian Matrix whose
determinant detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian
.. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Examples
--------
>>> from skimage import data, feature
>>> img = data.coins()
>>> feature.blob_doh(img)
array([[ 270. , 363. , 30. ],
[ 265. , 113. , 23.55555556],
[ 262. , 243. , 23.55555556],
[ 260. , 173. , 30. ],
[ 197. , 153. , 20.33333333],
[ 197. , 44. , 20.33333333],
[ 195. , 100. , 23.55555556],
[ 193. , 275. , 23.55555556],
[ 192. , 212. , 23.55555556],
[ 185. , 348. , 30. ],
[ 156. , 302. , 30. ],
[ 126. , 153. , 20.33333333],
[ 126. , 101. , 20.33333333],
[ 124. , 336. , 20.33333333],
[ 123. , 205. , 20.33333333],
[ 123. , 44. , 23.55555556],
[ 121. , 271. , 30. ]])
Notes
-----
The radius of each blob is approximately `sigma`.
Computation of Determinant of Hessians is independent of the standard
deviation. Therefore detecting larger blobs won't take more time. In
methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
of Gaussians for larger `sigma` takes more time. The downside is that
this method can't be used for detecting blobs of radius less than `3px`
due to the box filters used in the approximation of Hessian Determinant.
"""
assert_nD(image, 2)
image = img_as_float(image)
image = integral_image(image)
if log_scale:
start, stop = log(min_sigma, 10), log(max_sigma, 10)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
image_cube = np.dstack(hessian_images)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * image_cube.ndim),
threshold_rel=0.0,
exclude_border=False)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, -1] = sigma_list[local_maxima[:, -1]]
return _prune_blobs(lm, overlap)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/skimage/feature/blob.py
|
Python
|
gpl-3.0
| 19,138
|
[
"Gaussian"
] |
6ae17cbf9e56d985ba239101f40084cb65823215733dd1c0bf91536948a92b99
|
#! /usr/bin/env python
import re
import math
import collections
import numpy as np
import time
import operator
from scipy.io import mmread, mmwrite
from random import randint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing as pp
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.decomposition import ProbabilisticPCA, KernelPCA
from sklearn.decomposition import NMF
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet
import scipy.stats as stats
from sklearn import tree
from sklearn.feature_selection import f_regression
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, f1_score
from sklearn.gaussian_process import GaussianProcess
import features
# working directory
dir = '.'
label_index = 770
# load train data
def load_train_fs():
# In the validation process, the training data was randomly shuffled firstly.
# For the prediction process, there is no need to shuffle the dataset.
# Owing to out of memory problem, Gaussian process only use part of training data, the prediction of gaussian process
# may be a little different from the model,which the training data was shuffled.
train_fs = np.genfromtxt(open(dir + '/train_v2_1000.csv','rb'), delimiter=',', skip_header=1)
col_mean = stats.nanmean(train_fs, axis=0)
inds = np.where(np.isnan(train_fs))
train_fs[inds] = np.take(col_mean, inds[1])
train_fs[np.isinf(train_fs)] = 0
return train_fs
# load test data
def load_test_fs():
test_fs = np.genfromtxt(open(dir + '/test_v2.csv','rb'), delimiter=',', skip_header = 1)
col_mean = stats.nanmean(test_fs, axis=0)
inds = np.where(np.isnan(test_fs))
test_fs[inds] = np.take(col_mean, inds[1])
test_fs[np.isinf(test_fs)] = 0
return test_fs
# extract features from test data
def test_type(test_fs):
x_Test = test_fs[:,range(1, label_index)]
return x_Test
# extract features from train data
def train_type(train_fs):
train_x = train_fs[:,range(1, label_index)]
train_y= train_fs[:,-1]
return train_x, train_y
# transform the loss to the binary form
def toLabels(train_y):
labels = np.zeros(len(train_y))
labels[train_y>0] = 1
return labels
# generate the output file based to the predictions
def output_preds(preds):
out_file = dir + '/output_1000.csv'
fs = open(out_file,'w')
fs.write('id,loss\n')
for i in range(len(preds)):
if preds[i] > 100:
preds[i] = 100
elif preds[i] < 0:
preds[i] = 0
strs = str(i+105472) + ',' + str(np.float(preds[i]))
fs.write(strs + '\n');
fs.close()
return
# get the top feature indexes by invoking f_regression
def getTopFeatures(train_x, train_y, n_features=100):
f_val, p_val = f_regression(train_x,train_y)
f_val_dict = {}
p_val_dict = {}
for i in range(len(f_val)):
if math.isnan(f_val[i]):
f_val[i] = 0.0
f_val_dict[i] = f_val[i]
if math.isnan(p_val[i]):
p_val[i] = 0.0
p_val_dict[i] = p_val[i]
sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
feature_indexs = []
for i in range(0,n_features):
feature_indexs.append(sorted_f[i][0])
return feature_indexs
# generate the new data, based on which features are generated, and used
def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[],
feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[],
feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[],
feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]):
sub_train_x = train_x[:,feature_indexs]
for i in range(len(feature_minus_pair_list)):
ind_i = feature_minus_pair_list[i][0]
ind_j = feature_minus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j]))
for i in range(len(feature_plus_pair_list)):
ind_i = feature_plus_pair_list[i][0]
ind_j = feature_plus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j]))
for i in range(len(feature_mul_pair_list)):
ind_i = feature_mul_pair_list[i][0]
ind_j = feature_mul_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j]))
for i in range(len(feature_divide_pair_list)):
ind_i = feature_divide_pair_list[i][0]
ind_j = feature_divide_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j]))
for i in range(len(feature_pair_sub_mul_list)):
ind_i = feature_pair_sub_mul_list[i][0]
ind_j = feature_pair_sub_mul_list[i][1]
ind_k = feature_pair_sub_mul_list[i][2]
sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k]))
return sub_train_x
# use gbm classifier to predict whether the loan defaults or not
def gbc_classify(train_x, train_y):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20],
features.feature_pair_sub_mul_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8)
gbc.fit(sub_x_Train, labels)
return gbc
# use svm to predict the loss, based on the result of gbm classifier
def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101])
sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
svr = SVR(C=16, kernel='rbf', gamma = 0.000122)
svr.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = svr.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# use gbm regression to predict the loss, based on the result of gbm classifier
def gbc_gbr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20],feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
gbr1000 = GradientBoostingRegressor(n_estimators=1300, max_depth=4, subsample=0.5, learning_rate=0.05)
gbr1000.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = gbr1000.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# predict the loss based on the Gaussian process regressor, which has been trained
def gp_predict(clf, x_Test):
size = len(x_Test)
part_size = 3000
cnt = (size-1) / part_size + 1
preds = []
for i in range(cnt):
if i < cnt - 1:
pred_part = clf.predict(x_Test[i*part_size: (i+1) * part_size])
else:
pred_part = clf.predict(x_Test[i*part_size: size])
preds.extend(pred_part)
return np.power(np.e,preds)
# train the gaussian process regressor
def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part):
#Owing to out of memory, the model was trained by part of training data
#Attention, this part was trained on the ram of more than 96G
sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
ind_train = np.where(train_y>0)[0]
part_size= int(0.7 * len(ind_train))
gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential')
gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]]))
flag = (sub_x_Test_part[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16])
sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp])
gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp])
gp_preds = np.zeros(len(sub_x_Test_part))
gp_preds[ind_tmp] = gp_preds_tmp
return gp_preds
# use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part
def gbc_gp_predict(train_x, train_y, test_x):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9)
gbc.fit(sub_x_Train, labels)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test])
gp_preds = np.zeros(len(test_x))
gp_preds[ind_test] = gp_preds_part
return gp_preds
# invoke the function gbc_svr_predict_part
def gbc_svr_predict(gbc, train_x, train_y, test_x):
svr_preds = gbc_svr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list,
features.feature_pair_mul_list, features.feature_pair_divide_list,
features.feature_pair_sub_mul_list, features.feature_pair_sub_list_sf,
features.feature_pair_plus_list2)
return svr_preds
# invoke the function gbc_gbr_predict_part
def gbc_gbr_predict(gbc, train_x, train_y, test_x):
gbr_preds = gbc_gbr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list,
features.feature_pair_plus_list, features.feature_pair_mul_list,
features.feature_pair_divide_list, features.feature_pair_sub_mul_list,
features.feature_pair_sub_list2)
return gbr_preds
# the main function
if __name__ == '__main__':
train_fs = load_train_fs()
test_fs = load_test_fs()
train_x, train_y = train_type(train_fs)
test_x = test_type(test_fs)
gbc = gbc_classify(train_x, train_y)
svr_preds = gbc_svr_predict(gbc, train_x, train_y, test_x)
gbr_preds = gbc_gbr_predict(gbc, train_x, train_y, test_x)
gp_preds = gbc_gp_predict(train_x, train_y, test_x)
preds_all = svr_preds * 0.4 + gp_preds * 0.25 + gbr_preds * 0.35
output_preds(preds_all)
|
Goodideax/CS249
|
predict_combine_1000.py
|
Python
|
bsd-3-clause
| 14,591
|
[
"Gaussian"
] |
140da85ae98fe09c5f7c5af892b6864db26ffd00a3adb67d8e81923e869b6d46
|
"""
Noise covariance function
-------------------------
NoiseCFISO
NoiseCFReplicates
"""
import sys
sys.path.append("../")
# import python / numpy:
import scipy as SP
from pygp.covar import CovarianceFunction
class NoiseCFISO(CovarianceFunction):
"""
Covariance function for Gaussian observation noise for
all datapoints as a whole.
"""
def __init__(self,*args,**kw_args):
CovarianceFunction.__init__(self,*args,**kw_args)
self.n_hyperparameters = 1
def get_hyperparameter_names(self):
#"""return the names of hyperparameters to make identificatio neasier"""
names = []
names.append('Sigma')
return names
def K(self,theta,x1,x2=None):
"""
Get Covariance matrix K with given hyperparameters theta and inputs *args* = X[, X']. Note that this covariance function will only get noise as hyperparameter!
**Parameters:**
See :py:class:`pygp.covar.CovarianceFunction`
"""
#noise is only presenet if have a single argument
if(x2 is None):
noise = SP.eye(x1.shape[0])*SP.exp(2*theta[0])
else:
noise = 0
return noise
def Kgrad_theta(self,theta,x1,i):
"""
The derivative of the covariance matrix with
respect to i-th hyperparameter.
**Parameters:**
See :py:class:`pygp.covar.CovarianceFunction`
"""
#1. calculate kernel
#no noise
K = self.K(theta,x1)
assert i==0, 'unknown hyperparameter'
return 2*K
def Kgrad_x(self,theta,x1,x2,d):
RV = SP.zeros([x1.shape[0],x2.shape[0]])
return RV
def Kgrad_xdiag(self,theta,x1,d):
RV = SP.zeros([x1.shape[0]])
return RV
class NoiseCFReplicates(CovarianceFunction):
"""Covariance function for replicate-wise Gaussian observation noise"""
def __init__(self, replicate_indices,*args,**kw_args):
CovarianceFunction.__init__(self,*args,**kw_args)
self.replicate_indices = replicate_indices
self.n_hyperparameters = len(SP.unique(replicate_indices))
def get_hyperparameter_names(self):
#"""return the names of hyperparameters to make identificatio neasier"""
names = ["Sigma %i" % (i) for i in range(self.n_hyperparameters)]
return names
def K(self,theta,x1,x2=None):
"""
Get Covariance matrix K with given hyperparameters theta and inputs *args* = X[, X']. Note that this covariance function will only get noise as hyperparameters!
**Parameters:**
See :py:class:`pygp.covar.CovarianceFunction`
"""
assert len(theta)==self.n_hyperparameters,'Too many hyperparameters'
#noise is only present if have a single argument
if(x2 is None):
noise = SP.eye(x1.shape[0])
for i_,n in enumerate(theta):
noise[self.replicate_indices==i_] *= SP.exp(2*n)
else:
noise = 0
return noise
def Kgrad_theta(self,theta,x1,i):
'''
The derivative of the covariance matrix with
respect to i-th hyperparameter.
**Parameters:**
See :py:class:`pygp.covar.CovarianceFunction`
'''
#1. calculate kernel
#no noise
assert i<self.n_hyperparameters, 'unknown hyperparameters'
K = SP.eye(x1.shape[0])
K[self.replicate_indices==i] *= SP.exp(
2*theta[i])
K[self.replicate_indices!=i] *= 0
return 2*K
def Kgrad_x(self,theta,x1,x2,d):
RV = SP.zeros([x1.shape[0],x2.shape[0]])
return RV
def Kgrad_xdiag(self,theta,x1,d):
RV = SP.zeros([x1.shape[0]])
return RV
|
PMBio/pygp
|
pygp/covar/noise.py
|
Python
|
gpl-2.0
| 3,746
|
[
"Gaussian"
] |
e3c192e6c50ba81cc6adf5987ffd67b185a964565979ed74092965397bd2a0e3
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
import numpy
import types
import scipy.linalg.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
blas_enorm32, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None, ftol=1.e-10,
xtol=1.e-10, gtol=1.e-10, damp=0., maxiter=200, factor=100.,
nprint=1, iterfunct='default', iterkw={}, nocovar=0, rescale=0,
autoderivative=1, quiet=0, diag=None, epsfcn=None, debug=False,
**kwargs):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
else:
self.fcn = fcn
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) != types.ListType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) != types.DictionaryType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
self.parinfo_in = parinfo
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
self.parnames = self.parinfo(parinfo, 'parname')
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print 'Entering defiter...'
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print "Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof)
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print p + (pformat % x[i]) + ' '
return 0
def print_results(self, **kwargs):
self.defiter(self.fcn, self.params, self.niter, parinfo=self.parinfo_in,
dof=self.dof, fnorm=self.fnorm, **kwargs)
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print 'Entering parinfo...'
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) == types.ListType:
test=default[0]
if isinstance(test, types.IntType):
values = numpy.asarray(values, int)
elif isinstance(test, types.FloatType):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print 'Entering call...'
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print 'Entering fdjac2...'
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print 'ERROR: Derivative matrix was not computed properly.'
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print 'Entering qrfac...'
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print 'Entering qrsolv...'
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print 'Entering lmpar...'
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print 'Entering tie...'
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print 'Entering calc_covar...'
if numpy.ndim(rr) != 2:
print 'ERROR: r must be a two-dimensional matrix'
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print 'ERROR: r must be a square matrix'
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
class mpfitException(Exception):
pass
|
Kruehlio/XSspec
|
utils/mpfit.py
|
Python
|
mit
| 93,147
|
[
"Gaussian"
] |
3970dd996a0fd8ee9bc9f098c129776640d3219cbfb5f212c95243da8c248af7
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import testtools
from neutron.agent import firewall
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.conf.agent import common as agent_config
from neutron.conf.agent import securitygroups_rpc as security_config
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
security_config.register_securitygroups_opts()
agent_config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
# don't mess with sysctl knobs in unit tests
self.firewall._enabled_netfilter_for_bridges = True
# initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
get_rules_for_table_func = lambda x: RAW_TABLE_OUTPUT.split('\n')
filtered_ports = {port_id: self._fake_port()
for port_id in self._dev_zone_map}
self.firewall.ipconntrack = ip_conntrack.IpConntrackManager(
get_rules_for_table_func, filtered_ports=filtered_ports,
unfiltered_ports=dict())
self.firewall.ipconntrack._device_zone_map = self._dev_zone_map
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port_by_num(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '6',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_dccp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'dccp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p dccp -m dccp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_sctp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'sctp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p sctp -m sctp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_blank(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': ''}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_zero(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '0'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_encap(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'encap'}
ingress = mock.call.add_rule('ifake_dev',
'-p encap -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_encap_by_num(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '98'}
ingress = mock.call.add_rule('ifake_dev',
'-p encap -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_dest_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_source_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 'echo-request',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 'echo-request',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev',
'-s ::/128 -d ff02::/16 '
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j RETURN' %
icmp6_type,
comment=None) for icmp6_type
in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p ipv6-icmp -m icmp6 --icmpv6-type '
'%s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW)]
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('sfake_dev',
'-s fe80::fdff:ffff:feff:ffff/128 -m mac '
'--mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW))
calls.append(mock.call.add_rule('sfake_dev', '-j DROP',
comment=ic.PAIR_DROP))
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j DROP' %
constants.ICMPV6_TYPE_RA,
comment=None))
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -j RETURN',
comment=None))
calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 --dport 547 '
'-j RETURN', comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol, direction,
ct_zone):
port = self._fake_port()
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
cmd.extend(['-w', ct_zone])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction, ct_zone=10)
def test_remove_conntrack_entries_for_delete_rule_ipv4_no_ct_zone(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction, ct_zone=None)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction, ct_zone=10)
def test_remove_conntrack_entries_for_delete_rule_ipv6_no_ct_zone(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction, ct_zone=None)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
self._test_remove_conntrack_entries_for_port_sec_group_change(
ct_zone=10)
def test_remove_conntrack_entries_for_port_sec_group_change_no_ct_zone(
self):
self._test_remove_conntrack_entries_for_port_sec_group_change(
ct_zone=None)
def _get_expected_conntrack_calls(self, ips, ct_zone):
expected_calls = []
for ip_item in ips:
proto = ip_item[0]
ip = ip_item[1]
for direction in ['-d', '-s']:
cmd = ['conntrack', '-D', '-f', proto, direction, ip]
if ct_zone:
cmd.extend(['-w', ct_zone])
expected_calls.append(
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]))
return expected_calls
def _test_remove_conntrack_entries_for_port_sec_group_change(self,
ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
calls = self._get_expected_conntrack_calls(
[('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone)
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', direction, ct_zone=10)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4_no_ct_zone(
self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', direction, ct_zone=None)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', direction, ct_zone=10)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6_no_ct_zone(
self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', direction, ct_zone=None)
def _test_remove_conntrack_entries_sg_member_changed(self, ethertype,
direction, ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
port['security_group_source_groups'] = ['fake_sg_id2']
port['security_group_rules'] = [{'security_group_id': 'fake_sg_id',
'direction': direction,
'remote_group_id': 'fake_sg_id2',
'ethertype': ethertype}]
self.firewall.filtered_ports = {port['device']: port}
if ethertype == "IPv4":
ethertype = "ipv4"
members_add = {'IPv4': ['10.0.0.2', '10.0.0.3']}
members_after_delete = {'IPv4': ['10.0.0.3']}
else:
ethertype = "ipv6"
members_add = {'IPv6': ['fe80::2', 'fe80::3']}
members_after_delete = {'IPv6': ['fe80::3']}
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
# add ['10.0.0.2', '10.0.0.3'] or ['fe80::2', 'fe80::3']
self.firewall.security_group_updated('sg_member', ['fake_sg_id2'])
self.firewall.update_security_group_members(
'fake_sg_id2', members_add)
# delete '10.0.0.2' or 'fe80::2'
self.firewall.security_group_updated('sg_member', ['fake_sg_id2'])
self.firewall.update_security_group_members(
'fake_sg_id2', members_after_delete)
# check conntrack deletion from '10.0.0.1' to '10.0.0.2' or
# from 'fe80::1' to 'fe80::2'
ips = {"ipv4": ['10.0.0.1', '10.0.0.2'],
"ipv6": ['fe80::1', 'fe80::2']}
calls = []
for direction in ['ingress', 'egress']:
direction = '-d' if direction == 'ingress' else '-s'
remote_ip_direction = '-s' if direction == '-d' else '-d'
conntrack_cmd = ['conntrack', '-D', '-f', ethertype,
direction, ips[ethertype][0]]
if not ct_zone:
continue
conntrack_cmd.extend(['-w', 10])
conntrack_cmd.extend([remote_ip_direction, ips[ethertype][1]])
calls.append(mock.call(conntrack_cmd,
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]))
self.utils_exec.assert_has_calls(calls)
def test_user_sg_rules_deduped_before_call_to_iptables_manager(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}] * 2
self.firewall.prepare_port_filter(port)
rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls]
self.assertEqual(len(set(rules)), len(rules))
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_delete_conntrack_from_delete_port(self):
self._test_delete_conntrack_from_delete_port(ct_zone=10)
def test_delete_conntrack_from_delete_port_no_ct_zone(self):
self._test_delete_conntrack_from_delete_port(ct_zone=None)
def _test_delete_conntrack_from_delete_port(self, ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports = {'tapfake_dev': port}
self.firewall.devices_with_updated_sg_members['fake_sg_id2'
] = ['tapfake_dev']
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
new_port['device'] = ['tapfake_dev2']
new_port['fixed_ips'] = ['10.0.0.2', 'fe80::2']
self.firewall.sg_members['fake_sg_id2'] = {'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::2']}
mock.patch.object(self.firewall.ipconntrack, 'get_device_zone',
return_value=ct_zone).start()
self.firewall.remove_port_filter(port)
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
calls = self._get_expected_conntrack_calls(
[('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone)
self.utils_exec.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occurs
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare',
'network_id': 'fake_net'}
port_update = {'device': 'd1', 'mac_address': 'update',
'network_id': 'fake_net'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_name_exists.return_value = True
self.firewall.ipset.set_members = mock.Mock(return_value=([], []))
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in remote_groups.items():
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_update_security_group_members(self):
sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_single_fallback_accept_rule(self):
p1, p2 = self._fake_port(), self._fake_port()
self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
sg_chain_v4_accept = [call for call in v4_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
sg_chain_v6_accept = [call for call in v6_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
self.assertEqual(1, len(sg_chain_v4_accept))
self.assertEqual(1, len(sg_chain_v6_accept))
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_name_exists('NIPv4fake_sgid'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_name_exists('NIPv6fake_sgid'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ff0f-ffff'
mac_unix = 'FF:FF:FF:0F:FF:FF'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff'))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
# ensure that LLA is not added again for another v6 addr
ipv62 = 'fe81::1'
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv62,
mac_ipv4_pairs, mac_ipv6_pairs)
fake_ipv6_pair.append((mac_unix, ipv62))
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def test__populate_initial_zone_map(self):
self.assertEqual(self._dev_zone_map,
self.firewall.ipconntrack._device_zone_map)
def test__generate_device_zone(self):
# initial data has 1, 2, and 9 in use.
# we fill from top up first.
self.assertEqual(10,
self.firewall.ipconntrack._generate_device_zone('test'))
# once it's maxed out, it scans for gaps
self.firewall.ipconntrack._device_zone_map['someport'] = (
ip_conntrack.MAX_CONNTRACK_ZONES)
for i in range(3, 9):
self.assertEqual(i,
self.firewall.ipconntrack._generate_device_zone(i))
# 9 and 10 are taken so next should be 11
self.assertEqual(11,
self.firewall.ipconntrack._generate_device_zone('p11'))
# take out zone 1 and make sure it's selected
self.firewall.ipconntrack._device_zone_map.pop('e804433b-61')
self.assertEqual(1,
self.firewall.ipconntrack._generate_device_zone('p1'))
# fill it up and then make sure an extra throws an error
for i in range(1, 65536):
self.firewall.ipconntrack._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall.ipconntrack._find_open_zone()
# with it full, try again, this should trigger a cleanup and return 1
self.assertEqual(1,
self.firewall.ipconntrack._generate_device_zone('p12'))
self.assertEqual({'p12': 1},
self.firewall.ipconntrack._device_zone_map)
def test_get_device_zone(self):
dev = {'device': 'tap1234', 'network_id': '12345678901234567'}
# initial data has 1, 2, and 9 in use.
self.assertEqual(10, self.firewall.ipconntrack.get_device_zone(dev))
# should have been truncated to 11 chars
self._dev_zone_map.update({'12345678901': 10})
self.assertEqual(self._dev_zone_map,
self.firewall.ipconntrack._device_zone_map)
def test_multiple_firewall_with_common_conntrack(self):
self.firewall1 = iptables_firewall.OVSHybridIptablesFirewallDriver()
self.firewall2 = iptables_firewall.OVSHybridIptablesFirewallDriver()
self.assertEqual(id(self.firewall1.ipconntrack),
id(self.firewall2.ipconntrack))
|
eayunstack/neutron
|
neutron/tests/unit/agent/linux/test_iptables_firewall.py
|
Python
|
apache-2.0
| 97,081
|
[
"FEFF"
] |
2ddbcaa7d3a3c00e5122258039c919aa4dad9b9c43192fd2a40a6e5d372ab977
|
import os
import unittest
import pytest
import deepchem as dc
import numpy as np
from deepchem.models.losses import L2Loss
from deepchem.feat.mol_graphs import ConvMol
try:
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
class MLP(dc.models.KerasModel):
def __init__(self,
n_tasks=1,
feature_dim=100,
hidden_layer_size=64,
**kwargs):
self.feature_dim = feature_dim
self.hidden_layer_size = hidden_layer_size
self.n_tasks = n_tasks
model, loss, output_types = self._build_graph()
super(MLP, self).__init__(
model=model, loss=loss, output_types=output_types, **kwargs)
def _build_graph(self):
inputs = Input(dtype=tf.float32, shape=(self.feature_dim,), name="Input")
out1 = Dense(units=self.hidden_layer_size, activation='relu')(inputs)
final = Dense(units=self.n_tasks, activation='sigmoid')(out1)
outputs = [final]
output_types = ['prediction']
loss = dc.models.losses.BinaryCrossEntropy()
model = tf.keras.Model(inputs=[inputs], outputs=outputs)
return model, loss, output_types
has_tensorflow = True
except:
has_tensorflow = False
class TestPretrained(unittest.TestCase):
@pytest.mark.tensorflow
def setUp(self):
self.feature_dim = 2
self.hidden_layer_size = 10
data_points = 10
X = np.random.randn(data_points, self.feature_dim)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
self.dataset = dc.data.NumpyDataset(X, y)
@pytest.mark.tensorflow
def test_load_from_pretrained(self):
"""Tests loading pretrained model."""
source_model = MLP(
hidden_layer_size=self.hidden_layer_size,
feature_dim=self.feature_dim,
batch_size=10)
source_model.fit(self.dataset, nb_epoch=1000, checkpoint_interval=0)
dest_model = MLP(
feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size,
n_tasks=10)
assignment_map = dict()
value_map = dict()
dest_vars = dest_model.model.trainable_variables[:-2]
for idx, dest_var in enumerate(dest_vars):
source_var = source_model.model.trainable_variables[idx]
assignment_map[source_var.experimental_ref()] = dest_var
value_map[source_var.experimental_ref()] = source_var.numpy()
dest_model.load_from_pretrained(
source_model=source_model,
assignment_map=assignment_map,
value_map=value_map)
for source_var, dest_var in assignment_map.items():
source_val = source_var.deref().numpy()
dest_val = dest_var.numpy()
np.testing.assert_array_almost_equal(source_val, dest_val)
@pytest.mark.tensorflow
def test_load_pretrained_subclassed_model(self):
from rdkit import Chem
bi_tasks = ['a', 'b']
y = np.ones((3, 2))
smiles = ['C', 'CC', 'CCC']
mols = [Chem.MolFromSmiles(smile) for smile in smiles]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(mols)
dataset = dc.data.NumpyDataset(X, y, ids=smiles)
source_model = dc.models.GraphConvModel(
n_tasks=len(bi_tasks),
graph_conv_layers=[128, 128],
dense_layer_size=512,
dropout=0,
mode='regression',
learning_rate=0.001,
batch_size=8,
model_dir="model")
source_model.fit(dataset)
dest_model = dc.models.GraphConvModel(
n_tasks=len(bi_tasks),
graph_conv_layers=[128, 128],
dense_layer_size=512,
dropout=0,
mode='regression',
learning_rate=0.001,
batch_size=8)
X_b, y_b, w_b, ids_b = next(
dataset.iterbatches(batch_size=8, deterministic=True, pad_batches=True))
multiConvMol = ConvMol.agglomerate_mols(X_b)
n_samples = np.array(X_b.shape[0])
inputs = [
multiConvMol.get_atom_features(), multiConvMol.deg_slice,
np.array(multiConvMol.membership), n_samples
]
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
inputs.append(multiConvMol.get_deg_adjacency_lists()[i])
dest_model.load_from_pretrained(
source_model=source_model,
assignment_map=None,
value_map=None,
include_top=False,
inputs=inputs)
source_vars = source_model.model.trainable_variables[:-2]
dest_vars = dest_model.model.trainable_variables[:-2]
assert len(source_vars) == len(dest_vars)
for source_var, dest_var in zip(*(source_vars, dest_vars)):
source_val = source_var.numpy()
dest_val = dest_var.numpy()
np.testing.assert_array_almost_equal(source_val, dest_val)
@pytest.mark.tensorflow
def test_restore_equivalency(self):
"""Test for restore based pretrained model loading."""
source_model = MLP(
feature_dim=self.feature_dim, hidden_layer_size=self.hidden_layer_size)
source_model.fit(self.dataset, nb_epoch=1000)
dest_model = MLP(
feature_dim=self.feature_dim, hidden_layer_size=self.hidden_layer_size)
dest_model.load_from_pretrained(
source_model=source_model,
assignment_map=None,
value_map=None,
model_dir=None,
include_top=True)
predictions = np.squeeze(dest_model.predict_on_batch(self.dataset.X))
np.testing.assert_array_almost_equal(self.dataset.y, np.round(predictions))
|
deepchem/deepchem
|
deepchem/models/tests/test_pretrained_keras.py
|
Python
|
mit
| 5,350
|
[
"RDKit"
] |
21771535979108881c229bbb91e9c36a4f59f1ec185ad2b9703f119104a93132
|
from warnings import warn
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from shapely.geometry import box
from shapely.geometry.base import BaseGeometry
from .array import GeometryArray, GeometryDtype
def is_geometry_type(data):
"""
Check if the data is of geometry dtype.
Does not include object array of shapely scalars.
"""
if isinstance(getattr(data, "dtype", None), GeometryDtype):
# GeometryArray, GeoSeries and Series[GeometryArray]
return True
else:
return False
def _delegate_binary_method(op, this, other, align, *args, **kwargs):
# type: (str, GeoSeries, GeoSeries) -> GeoSeries/Series
this = this.geometry
if isinstance(other, GeoPandasBase):
if align and not this.index.equals(other.index):
warn("The indices of the two GeoSeries are different.")
this, other = this.align(other.geometry)
else:
other = other.geometry
a_this = GeometryArray(this.values)
other = GeometryArray(other.values)
elif isinstance(other, BaseGeometry):
a_this = GeometryArray(this.values)
else:
raise TypeError(type(this), type(other))
data = getattr(a_this, op)(other, *args, **kwargs)
return data, this.index
def _binary_geo(op, this, other, align):
# type: (str, GeoSeries, GeoSeries) -> GeoSeries
"""Binary operation on GeoSeries objects that returns a GeoSeries"""
from .geoseries import GeoSeries
geoms, index = _delegate_binary_method(op, this, other, align)
return GeoSeries(geoms.data, index=index, crs=this.crs)
def _binary_op(op, this, other, align, *args, **kwargs):
# type: (str, GeoSeries, GeoSeries, args/kwargs) -> Series[bool/float]
"""Binary operation on GeoSeries objects that returns a Series"""
data, index = _delegate_binary_method(op, this, other, align, *args, **kwargs)
return Series(data, index=index)
def _delegate_property(op, this):
# type: (str, GeoSeries) -> GeoSeries/Series
a_this = GeometryArray(this.geometry.values)
data = getattr(a_this, op)
if isinstance(data, GeometryArray):
from .geoseries import GeoSeries
return GeoSeries(data.data, index=this.index, crs=this.crs)
else:
return Series(data, index=this.index)
def _delegate_geo_method(op, this, *args, **kwargs):
# type: (str, GeoSeries) -> GeoSeries
"""Unary operation that returns a GeoSeries"""
from .geoseries import GeoSeries
a_this = GeometryArray(this.geometry.values)
data = getattr(a_this, op)(*args, **kwargs).data
return GeoSeries(data, index=this.index, crs=this.crs)
class GeoPandasBase(object):
@property
def area(self):
"""Returns a ``Series`` containing the area of each geometry in the
``GeoSeries`` expressed in the units of the CRS.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Polygon([(10, 0), (10, 5), (0, 0)]),
... Polygon([(0, 0), (2, 2), (2, 0)]),
... LineString([(0, 0), (1, 1), (0, 1)]),
... Point(0, 1)
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((10.00000 0.00000, 10.00000 5.00000, ...
2 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 2....
3 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s.area
0 0.5
1 25.0
2 2.0
3 0.0
4 0.0
dtype: float64
See also
--------
GeoSeries.length : measure length
Notes
-----
Area may be invalid for a geographic CRS using degrees as units;
use :meth:`GeoSeries.to_crs` to project geometries to a planar
CRS before using this function.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
return _delegate_property("area", self)
@property
def crs(self):
"""
The Coordinate Reference System (CRS) represented as a ``pyproj.CRS``
object.
Returns None if the CRS is not set, and to set the value it
:getter: Returns a ``pyproj.CRS`` or None. When setting, the value
can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
Examples
--------
>>> s.crs # doctest: +SKIP
<Geographic 2D CRS: EPSG:4326>
Name: WGS 84
Axis Info [ellipsoidal]:
- Lat[north]: Geodetic latitude (degree)
- Lon[east]: Geodetic longitude (degree)
Area of Use:
- name: World
- bounds: (-180.0, -90.0, 180.0, 90.0)
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
See also
--------
GeoSeries.set_crs : assign CRS
GeoSeries.to_crs : re-project to another CRS
"""
return self.geometry.values.crs
@crs.setter
def crs(self, value):
"""Sets the value of the crs"""
self.geometry.values.crs = value
@property
def geom_type(self):
"""
Returns a ``Series`` of strings specifying the `Geometry Type` of each
object.
Examples
--------
>>> from shapely.geometry import Point, Polygon, LineString
>>> d = {'geometry': [Point(2, 1), Polygon([(0, 0), (1, 1), (1, 0)]),
... LineString([(0, 0), (1, 1)])]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf.geom_type
0 Point
1 Polygon
2 LineString
dtype: object
"""
return _delegate_property("geom_type", self)
@property
def type(self):
"""Return the geometry type of each geometry in the GeoSeries"""
return self.geom_type
@property
def length(self):
"""Returns a ``Series`` containing the length of each geometry
expressed in the units of the CRS.
In the case of a (Multi)Polygon it measures the length
of its exterior (i.e. perimeter).
Examples
--------
>>> from shapely.geometry import Polygon, LineString, MultiLineString, Point, \
GeometryCollection
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (1, 1), (0, 1)]),
... LineString([(10, 0), (10, 5), (0, 0)]),
... MultiLineString([((0, 0), (1, 0)), ((-1, 0), (1, 0))]),
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Point(0, 1),
... GeometryCollection([Point(1, 0), LineString([(10, 0), (10, 5), (0,\
0)])])
... ]
... )
>>> s
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINESTRING (10.00000 0.00000, 10.00000 5.00000...
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 0.0...
3 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
4 POINT (0.00000 1.00000)
5 GEOMETRYCOLLECTION (POINT (1.00000 0.00000), L...
dtype: geometry
>>> s.length
0 2.414214
1 16.180340
2 3.000000
3 3.414214
4 0.000000
5 16.180340
dtype: float64
See also
--------
GeoSeries.area : measure area of a polygon
Notes
-----
Length may be invalid for a geographic CRS using degrees as units;
use :meth:`GeoSeries.to_crs` to project geometries to a planar
CRS before using this function.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
return _delegate_property("length", self)
@property
def is_valid(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
geometries that are valid.
Examples
--------
An example with one invalid polygon (a bowtie geometry crossing itself)
and one missing geometry:
>>> from shapely.geometry import Polygon
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Polygon([(0,0), (1, 1), (1, 0), (0, 1)]), # bowtie geometry
... Polygon([(0, 0), (2, 2), (2, 0)]),
... None
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 1....
2 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 2....
3 None
dtype: geometry
>>> s.is_valid
0 True
1 False
2 True
3 False
dtype: bool
"""
return _delegate_property("is_valid", self)
@property
def is_empty(self):
"""
Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
empty geometries.
Examples
--------
An example of a GeoDataFrame with one empty point, one point and one missing
value:
>>> from shapely.geometry import Point
>>> d = {'geometry': [Point(), Point(2, 1), None]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
geometry
0 GEOMETRYCOLLECTION EMPTY
1 POINT (2.00000 1.00000)
2 None
>>> gdf.is_empty
0 True
1 False
2 False
dtype: bool
See Also
--------
GeoSeries.isna : detect missing values
"""
return _delegate_property("is_empty", self)
@property
def is_simple(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
geometries that do not cross themselves.
This is meaningful only for `LineStrings` and `LinearRings`.
Examples
--------
>>> from shapely.geometry import LineString
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (1, 1), (1, -1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, -1)]),
... ]
... )
>>> s
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
dtype: geometry
>>> s.is_simple
0 False
1 True
dtype: bool
"""
return _delegate_property("is_simple", self)
@property
def is_ring(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
features that are closed.
When constructing a LinearRing, the sequence of coordinates may be
explicitly closed by passing identical values in the first and last indices.
Otherwise, the sequence will be implicitly closed by copying the first tuple
to the last index.
Examples
--------
>>> from shapely.geometry import LineString, LinearRing
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (1, 1), (1, -1)]),
... LineString([(0, 0), (1, 1), (1, -1), (0, 0)]),
... LinearRing([(0, 0), (1, 1), (1, -1)]),
... ]
... )
>>> s
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 LINEARRING (0.00000 0.00000, 1.00000 1.00000, ...
dtype: geometry
>>> s.is_ring
0 False
1 True
2 True
dtype: bool
"""
return _delegate_property("is_ring", self)
@property
def has_z(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
features that have a z-component.
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(
... [
... Point(0, 1),
... Point(0, 1, 2),
... ]
... )
>>> s
0 POINT (0.00000 1.00000)
1 POINT Z (0.00000 1.00000 2.00000)
dtype: geometry
>>> s.has_z
0 False
1 True
dtype: bool
"""
return _delegate_property("has_z", self)
#
# Unary operations that return a GeoSeries
#
@property
def boundary(self):
"""Returns a ``GeoSeries`` of lower dimensional objects representing
each geometries's set-theoretic `boundary`.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 POINT (0.00000 0.00000)
dtype: geometry
>>> s.boundary
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 MULTIPOINT (0.00000 0.00000, 1.00000 0.00000)
2 GEOMETRYCOLLECTION EMPTY
dtype: geometry
See also
--------
GeoSeries.exterior : outer boundary (without interior rings)
"""
return _delegate_property("boundary", self)
@property
def centroid(self):
"""Returns a ``GeoSeries`` of points representing the centroid of each
geometry.
Note that centroid does not have to be on or within original geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 POINT (0.00000 0.00000)
dtype: geometry
>>> s.centroid
0 POINT (0.33333 0.66667)
1 POINT (0.70711 0.50000)
2 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.representative_point : point guaranteed to be within each geometry
"""
return _delegate_property("centroid", self)
@property
def convex_hull(self):
"""Returns a ``GeoSeries`` of geometries representing the convex hull
of each geometry.
The convex hull of a geometry is the smallest convex `Polygon`
containing all the points in each geometry, unless the number of points
in the geometric object is less than three. For two points, the convex
hull collapses to a `LineString`; for 1, a `Point`.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point, MultiPoint
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... MultiPoint([(0, 0), (1, 1), (0, 1), (1, 0), (0.5, 0.5)]),
... MultiPoint([(0, 0), (1, 1)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 MULTIPOINT (0.00000 0.00000, 1.00000 1.00000, ...
3 MULTIPOINT (0.00000 0.00000, 1.00000 1.00000)
4 POINT (0.00000 0.00000)
dtype: geometry
>>> s.convex_hull
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 1....
2 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
3 LINESTRING (0.00000 0.00000, 1.00000 1.00000)
4 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.envelope : bounding rectangle geometry
"""
return _delegate_property("convex_hull", self)
@property
def envelope(self):
"""Returns a ``GeoSeries`` of geometries representing the envelope of
each geometry.
The envelope of a geometry is the bounding rectangle. That is, the
point or smallest rectangular polygon (with sides parallel to the
coordinate axes) that contains the geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point, MultiPoint
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... MultiPoint([(0, 0), (1, 1)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 MULTIPOINT (0.00000 0.00000, 1.00000 1.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s.envelope
0 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
1 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
2 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
3 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.convex_hull : convex hull geometry
"""
return _delegate_property("envelope", self)
@property
def exterior(self):
"""Returns a ``GeoSeries`` of LinearRings representing the outer
boundary of each polygon in the GeoSeries.
Applies to GeoSeries containing only Polygons. Returns ``None``` for
other geometry types.
Examples
--------
>>> from shapely.geometry import Polygon, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Polygon([(1, 0), (2, 1), (0, 0)]),
... Point(0, 1)
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((1.00000 0.00000, 2.00000 1.00000, 0....
2 POINT (0.00000 1.00000)
dtype: geometry
>>> s.exterior
0 LINEARRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINEARRING (1.00000 0.00000, 2.00000 1.00000, ...
2 None
dtype: geometry
See also
--------
GeoSeries.boundary : complete set-theoretic boundary
GeoSeries.interiors : list of inner rings of each polygon
"""
# TODO: return empty geometry for non-polygons
return _delegate_property("exterior", self)
@property
def interiors(self):
"""Returns a ``Series`` of List representing the
inner rings of each polygon in the GeoSeries.
Applies to GeoSeries containing only Polygons.
Returns
----------
inner_rings: Series of List
Inner rings of each polygon in the GeoSeries.
Examples
--------
>>> from shapely.geometry import Polygon
>>> s = geopandas.GeoSeries(
... [
... Polygon(
... [(0, 0), (0, 5), (5, 5), (5, 0)],
... [[(1, 1), (2, 1), (1, 2)], [(1, 4), (2, 4), (2, 3)]],
... ),
... Polygon([(1, 0), (2, 1), (0, 0)]),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 0.00000 5.00000, 5....
1 POLYGON ((1.00000 0.00000, 2.00000 1.00000, 0....
dtype: geometry
>>> s.interiors
0 [LINEARRING (1 1, 2 1, 1 2, 1 1), LINEARRING (...
1 []
dtype: object
See also
--------
GeoSeries.exterior : outer boundary
"""
return _delegate_property("interiors", self)
def representative_point(self):
"""Returns a ``GeoSeries`` of (cheaply computed) points that are
guaranteed to be within each geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 POINT (0.00000 0.00000)
dtype: geometry
>>> s.representative_point()
0 POINT (0.25000 0.50000)
1 POINT (1.00000 1.00000)
2 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.centroid : geometric centroid
"""
return _delegate_geo_method("representative_point", self)
#
# Reduction operations that return a Shapely geometry
#
@property
def cascaded_union(self):
"""Deprecated: use `unary_union` instead"""
warn(
"The 'cascaded_union' attribute is deprecated, use 'unary_union' instead",
FutureWarning,
stacklevel=2,
)
return self.geometry.values.unary_union()
@property
def unary_union(self):
"""Returns a geometry containing the union of all geometries in the
``GeoSeries``.
Examples
--------
>>> from shapely.geometry import box
>>> s = geopandas.GeoSeries([box(0,0,1,1), box(0,0,2,2)])
>>> s
0 POLYGON ((1.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((2.00000 0.00000, 2.00000 2.00000, 0....
dtype: geometry
>>> union = s.unary_union
>>> print(union)
POLYGON ((0 1, 0 2, 2 2, 2 0, 1 0, 0 0, 0 1))
"""
return self.geometry.values.unary_union()
#
# Binary operations that return a pandas Series
#
def contains(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that contains `other`.
An object is said to contain `other` if at least one point of `other` lies in
the interior and no points of `other` lie in the exterior of the object.
(Therefore, any given polygon does not contain its own boundary – there is not
any point that lies in the interior.)
If either object is empty, this operation returns ``False``.
This is the inverse of :meth:`within` in the sense that the expression
``a.contains(b) == b.within(a)`` always evaluates to ``True``.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if it
is contained.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (0, 2)]),
... LineString([(0, 0), (0, 1)]),
... Point(0, 1),
... ],
... index=range(0, 4),
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... LineString([(0, 0), (0, 2)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
2 LINESTRING (0.00000 0.00000, 0.00000 1.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
3 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries contains a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> point = Point(0, 1)
>>> s.contains(point)
0 False
1 True
2 False
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s2.contains(s, align=True)
0 False
1 False
2 False
3 True
4 False
dtype: bool
>>> s2.contains(s, align=False)
1 True
2 False
3 True
4 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``contains`` *any* element of the other one.
See also
--------
GeoSeries.within
"""
return _binary_op("contains", self, other, align)
def geom_equals(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry equal to `other`.
An object is said to be equal to `other` if its set-theoretic
`boundary`, `interior`, and `exterior` coincides with those of the
other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test for
equality.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... LineString([(0, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... Point(0, 1),
... LineString([(0, 0), (0, 2)]),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
3 POINT (0.00000 1.00000)
4 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
dtype: geometry
We can check if each geometry of GeoSeries contains a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> polygon = Polygon([(0, 0), (2, 2), (0, 2)])
>>> s.geom_equals(polygon)
0 True
1 False
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.geom_equals(s2)
0 False
1 False
2 False
3 True
4 False
dtype: bool
>>> s.geom_equals(s2, align=False)
0 True
1 True
2 False
3 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.geom_almost_equals
GeoSeries.geom_equals_exact
"""
return _binary_op("geom_equals", self, other, align)
def geom_almost_equals(self, other, decimal=6, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` if
each aligned geometry is approximately equal to `other`.
Approximate equality is tested at all points to the specified `decimal`
place precision.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to compare to.
decimal : int
Decimal place presion used when testing for approximate equality.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(
... [
... Point(0, 1.1),
... Point(0, 1.01),
... Point(0, 1.001),
... ],
... )
>>> s
0 POINT (0.00000 1.10000)
1 POINT (0.00000 1.01000)
2 POINT (0.00000 1.00100)
dtype: geometry
>>> s.geom_almost_equals(Point(0, 1), decimal=2)
0 False
1 False
2 True
dtype: bool
>>> s.geom_almost_equals(Point(0, 1), decimal=1)
0 False
1 True
2 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.geom_equals
GeoSeries.geom_equals_exact
"""
return _binary_op(
"geom_almost_equals", self, other, decimal=decimal, align=align
)
def geom_equals_exact(self, other, tolerance, align=True):
"""Return True for all geometries that equal aligned *other* to a given
tolerance, else False.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to compare to.
tolerance : float
Decimal place presion used when testing for approximate equality.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(
... [
... Point(0, 1.1),
... Point(0, 1.0),
... Point(0, 1.2),
... ]
... )
>>> s
0 POINT (0.00000 1.10000)
1 POINT (0.00000 1.00000)
2 POINT (0.00000 1.20000)
dtype: geometry
>>> s.geom_equals_exact(Point(0, 1), tolerance=0.1)
0 False
1 True
2 False
dtype: bool
>>> s.geom_equals_exact(Point(0, 1), tolerance=0.15)
0 True
1 True
2 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.geom_equals
GeoSeries.geom_almost_equals
"""
return _binary_op(
"geom_equals_exact", self, other, tolerance=tolerance, align=align
)
def crosses(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that cross `other`.
An object is said to cross `other` if its `interior` intersects the
`interior` of the other but does not contain it, and the dimension of
the intersection is less than the dimension of the one or the other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
crossed.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries crosses a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(-1, 1), (3, 1)])
>>> s.crosses(line)
0 True
1 True
2 True
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.crosses(s2, align=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
>>> s.crosses(s2, align=False)
0 True
1 True
2 False
3 False
dtype: bool
Notice that a line does not cross a point that it contains.
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``crosses`` *any* element of the other one.
See also
--------
GeoSeries.disjoint
GeoSeries.intersects
"""
return _binary_op("crosses", self, other, align)
def disjoint(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry disjoint to `other`.
An object is said to be disjoint to `other` if its `boundary` and
`interior` does not intersect at all with those of the other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
disjoint.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(-1, 0), (-1, 2), (0, -2)]),
... LineString([(0, 0), (0, 1)]),
... Point(1, 1),
... Point(0, 0),
... ],
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
0 POLYGON ((-1.00000 0.00000, -1.00000 2.00000, ...
1 LINESTRING (0.00000 0.00000, 0.00000 1.00000)
2 POINT (1.00000 1.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
We can check each geometry of GeoSeries to a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(0, 0), (2, 0)])
>>> s.disjoint(line)
0 False
1 False
2 False
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.disjoint(s2)
0 True
1 False
2 False
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.intersects
GeoSeries.touches
"""
return _binary_op("disjoint", self, other, align)
def intersects(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that intersects `other`.
An object is said to intersect `other` if its `boundary` and `interior`
intersects in any way with those of the other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
intersected.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries crosses a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(-1, 1), (3, 1)])
>>> s.intersects(line)
0 True
1 True
2 True
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.intersects(s2, align=True)
0 False
1 True
2 True
3 False
4 False
dtype: bool
>>> s.intersects(s2, align=False)
0 True
1 True
2 True
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``crosses`` *any* element of the other one.
See also
--------
GeoSeries.disjoint
GeoSeries.crosses
GeoSeries.touches
GeoSeries.intersection
"""
return _binary_op("intersects", self, other, align)
def overlaps(self, other, align=True):
"""Returns True for all aligned geometries that overlap *other*, else False.
Geometries overlaps if they have more than one but not all
points in common, have the same dimension, and the intersection of the
interiors of the geometries has the same dimension as the geometries
themselves.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if
overlaps.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, MultiPoint, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... MultiPoint([(0, 0), (0, 1)]),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 0), (0, 2)]),
... LineString([(0, 1), (1, 1)]),
... LineString([(1, 1), (3, 3)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 MULTIPOINT (0.00000 0.00000, 0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 0....
2 LINESTRING (0.00000 1.00000, 1.00000 1.00000)
3 LINESTRING (1.00000 1.00000, 3.00000 3.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries overlaps a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> polygon = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
>>> s.overlaps(polygon)
0 True
1 True
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.overlaps(s2)
0 False
1 True
2 False
3 False
4 False
dtype: bool
>>> s.overlaps(s2, align=False)
0 True
1 False
2 True
3 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``overlaps`` *any* element of the other one.
See also
--------
GeoSeries.crosses
GeoSeries.intersects
"""
return _binary_op("overlaps", self, other, align)
def touches(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that touches `other`.
An object is said to touch `other` if it has at least one point in
common with `other` and its interior does not intersect with any part
of the other. Overlapping features therefore do not touch.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
touched.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, MultiPoint, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... MultiPoint([(0, 0), (0, 1)]),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (-2, 0), (0, -2)]),
... LineString([(0, 1), (1, 1)]),
... LineString([(1, 1), (3, 0)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 MULTIPOINT (0.00000 0.00000, 0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, -2.00000 0.00000, 0...
2 LINESTRING (0.00000 1.00000, 1.00000 1.00000)
3 LINESTRING (1.00000 1.00000, 3.00000 0.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries touches a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(0, 0), (-1, -2)])
>>> s.touches(line)
0 True
1 True
2 True
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.touches(s2, align=True)
0 False
1 True
2 True
3 False
4 False
dtype: bool
>>> s.touches(s2, align=False)
0 True
1 False
2 True
3 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``touches`` *any* element of the other one.
See also
--------
GeoSeries.overlaps
GeoSeries.intersects
"""
return _binary_op("touches", self, other, align)
def within(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that is within `other`.
An object is said to be within `other` if at least one of its points is located
in the `interior` and no points are located in the `exterior` of the other.
If either object is empty, this operation returns ``False``.
This is the inverse of :meth:`contains` in the sense that the
expression ``a.within(b) == b.contains(a)`` always evaluates to
``True``.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if each
geometry is within.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... LineString([(0, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (0, 2)]),
... LineString([(0, 0), (0, 1)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
3 LINESTRING (0.00000 0.00000, 0.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries is within a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> polygon = Polygon([(0, 0), (2, 2), (0, 2)])
>>> s.within(polygon)
0 True
1 True
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s2.within(s)
0 False
1 False
2 True
3 False
4 False
dtype: bool
>>> s2.within(s, align=False)
1 True
2 False
3 True
4 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is ``within`` *any* element of the other one.
See also
--------
GeoSeries.contains
"""
return _binary_op("within", self, other, align)
def covers(self, other, align=True):
"""
Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that is entirely covering `other`.
An object A is said to cover another object B if no points of B lie
in the exterior of A.
If either object is empty, this operation returns ``False``.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
See
https://lin-ear-th-inking.blogspot.com/2007/06/subtleties-of-ogc-covers-spatial.html
for reference.
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to check is being covered.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... Point(0, 0),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]),
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... LineString([(1, 1), (1.5, 1.5)]),
... Point(0, 0),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.50000 0.50000, 1.50000 0.50000, 1....
2 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
3 LINESTRING (1.00000 1.00000, 1.50000 1.50000)
4 POINT (0.00000 0.00000)
dtype: geometry
We can check if each geometry of GeoSeries covers a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> poly = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
>>> s.covers(poly)
0 True
1 False
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.covers(s2, align=True)
0 False
1 False
2 False
3 False
4 False
dtype: bool
>>> s.covers(s2, align=False)
0 True
1 False
2 True
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``covers`` *any* element of the other one.
See also
--------
GeoSeries.covered_by
GeoSeries.overlaps
"""
return _binary_op("covers", self, other, align)
def covered_by(self, other, align=True):
"""
Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that is entirely covered by `other`.
An object A is said to cover another object B if no points of B lie
in the exterior of A.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
See
https://lin-ear-th-inking.blogspot.com/2007/06/subtleties-of-ogc-covers-spatial.html
for reference.
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to check is being covered.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]),
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... LineString([(1, 1), (1.5, 1.5)]),
... Point(0, 0),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... Point(0, 0),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.50000 0.50000, 1.50000 0.50000, 1....
1 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
2 LINESTRING (1.00000 1.00000, 1.50000 1.50000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
2 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
3 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
4 POINT (0.00000 0.00000)
dtype: geometry
We can check if each geometry of GeoSeries is covered by a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> poly = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
>>> s.covered_by(poly)
0 True
1 True
2 True
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.covered_by(s2, align=True)
0 False
1 True
2 True
3 True
4 False
dtype: bool
>>> s.covered_by(s2, align=False)
0 True
1 False
2 True
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is ``covered_by`` *any* element of the other one.
See also
--------
GeoSeries.covers
GeoSeries.overlaps
"""
return _binary_op("covered_by", self, other, align)
def distance(self, other, align=True):
"""Returns a ``Series`` containing the distance to aligned `other`.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
distance to.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (float)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 0), (1, 1)]),
... Polygon([(0, 0), (-1, 0), (-1, 1)]),
... LineString([(1, 1), (0, 0)]),
... Point(0, 0),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]),
... Point(3, 1),
... LineString([(1, 0), (2, 0)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
1 POLYGON ((0.00000 0.00000, -1.00000 0.00000, -...
2 LINESTRING (1.00000 1.00000, 0.00000 0.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.50000 0.50000, 1.50000 0.50000, 1....
2 POINT (3.00000 1.00000)
3 LINESTRING (1.00000 0.00000, 2.00000 0.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check the distance of each geometry of GeoSeries to a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> point = Point(-1, 0)
>>> s.distance(point)
0 1.0
1 0.0
2 1.0
3 1.0
dtype: float64
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and use elements with the same index using
``align=True`` or ignore index and use elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.distance(s2, align=True)
0 NaN
1 0.707107
2 2.000000
3 1.000000
4 NaN
dtype: float64
>>> s.distance(s2, align=False)
0 0.000000
1 3.162278
2 0.707107
3 1.000000
dtype: float64
"""
return _binary_op("distance", self, other, align)
#
# Binary operations that return a GeoSeries
#
def difference(self, other, align=True):
"""Returns a ``GeoSeries`` of the points in each aligned geometry that
are not in `other`.
.. image:: ../../../_static/binary_geo-difference.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
difference to.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can do difference of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.difference(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 LINESTRING (1.00000 1.00000, 2.00000 2.00000)
3 MULTILINESTRING ((2.00000 0.00000, 1.00000 1.0...
4 POINT EMPTY
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.difference(s2, align=True)
0 None
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING EMPTY
4 POINT (0.00000 1.00000)
5 None
dtype: geometry
>>> s.difference(s2, align=False)
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 POLYGON ((0.00000 0.00000, 0.00000 2.00000, 1....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT EMPTY
dtype: geometry
See Also
--------
GeoSeries.symmetric_difference
GeoSeries.union
GeoSeries.intersection
"""
return _binary_geo("difference", self, other, align)
def symmetric_difference(self, other, align=True):
"""Returns a ``GeoSeries`` of the symmetric difference of points in
each aligned geometry with `other`.
For each geometry, the symmetric difference consists of points in the
geometry not in `other`, and points in `other` not in the geometry.
.. image:: ../../../_static/binary_geo-symm_diff.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
symmetric difference to.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can do symmetric difference of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.symmetric_difference(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
3 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
4 POLYGON ((0.00000 1.00000, 1.00000 1.00000, 0....
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.symmetric_difference(s2, align=True)
0 None
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING EMPTY
4 MULTIPOINT (0.00000 1.00000, 1.00000 1.00000)
5 None
dtype: geometry
>>> s.symmetric_difference(s2, align=False)
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT EMPTY
dtype: geometry
See Also
--------
GeoSeries.difference
GeoSeries.union
GeoSeries.intersection
"""
return _binary_geo("symmetric_difference", self, other, align)
def union(self, other, align=True):
"""Returns a ``GeoSeries`` of the union of points in each aligned geometry with
`other`.
.. image:: ../../../_static/binary_geo-union.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the union
with.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can do union of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.union(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
2 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
3 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
4 POLYGON ((0.00000 1.00000, 1.00000 1.00000, 0....
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.union(s2, align=True)
0 None
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 MULTIPOINT (0.00000 1.00000, 1.00000 1.00000)
5 None
dtype: geometry
>>> s.union(s2, align=False)
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
1 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
See Also
--------
GeoSeries.symmetric_difference
GeoSeries.difference
GeoSeries.intersection
"""
return _binary_geo("union", self, other, align)
def intersection(self, other, align=True):
"""Returns a ``GeoSeries`` of the intersection of points in each
aligned geometry with `other`.
.. image:: ../../../_static/binary_geo-intersection.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
intersection with.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can also do intersection of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.intersection(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
2 LINESTRING (0.00000 0.00000, 1.00000 1.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.intersection(s2, align=True)
0 None
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
2 POINT (1.00000 1.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT EMPTY
5 None
dtype: geometry
>>> s.intersection(s2, align=False)
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
1 LINESTRING (1.00000 1.00000, 1.00000 2.00000)
2 POINT (1.00000 1.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
See Also
--------
GeoSeries.difference
GeoSeries.symmetric_difference
GeoSeries.union
"""
return _binary_geo("intersection", self, other, align)
#
# Other operations
#
@property
def bounds(self):
"""Returns a ``DataFrame`` with columns ``minx``, ``miny``, ``maxx``,
``maxy`` values containing the bounds for each geometry.
See ``GeoSeries.total_bounds`` for the limits of the entire series.
Examples
--------
>>> from shapely.geometry import Point, Polygon, LineString
>>> d = {'geometry': [Point(2, 1), Polygon([(0, 0), (1, 1), (1, 0)]),
... LineString([(0, 1), (1, 2)])]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf.bounds
minx miny maxx maxy
0 2.0 1.0 2.0 1.0
1 0.0 0.0 1.0 1.0
2 0.0 1.0 1.0 2.0
"""
bounds = GeometryArray(self.geometry.values).bounds
return DataFrame(
bounds, columns=["minx", "miny", "maxx", "maxy"], index=self.index
)
@property
def total_bounds(self):
"""Returns a tuple containing ``minx``, ``miny``, ``maxx``, ``maxy``
values for the bounds of the series as a whole.
See ``GeoSeries.bounds`` for the bounds of the geometries contained in
the series.
Examples
--------
>>> from shapely.geometry import Point, Polygon, LineString
>>> d = {'geometry': [Point(3, -1), Polygon([(0, 0), (1, 1), (1, 0)]),
... LineString([(0, 1), (1, 2)])]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf.total_bounds
array([ 0., -1., 3., 2.])
"""
return GeometryArray(self.geometry.values).total_bounds
@property
def sindex(self):
"""Generate the spatial index
Creates R-tree spatial index based on ``pygeos.STRtree`` or
``rtree.index.Index``.
Note that the spatial index may not be fully
initialized until the first use.
Examples
--------
>>> from shapely.geometry import box
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(5), range(5)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
dtype: geometry
Query the spatial index with a single geometry based on the bounding box:
>>> s.sindex.query(box(1, 1, 3, 3))
array([1, 2, 3])
Query the spatial index with a single geometry based on the predicate:
>>> s.sindex.query(box(1, 1, 3, 3), predicate="contains")
array([2])
Query the spatial index with an array of geometries based on the bounding
box:
>>> s2 = geopandas.GeoSeries([box(1, 1, 3, 3), box(4, 4, 5, 5)])
>>> s2
0 POLYGON ((3.00000 1.00000, 3.00000 3.00000, 1....
1 POLYGON ((5.00000 4.00000, 5.00000 5.00000, 4....
dtype: geometry
>>> s.sindex.query_bulk(s2)
array([[0, 0, 0, 1],
[1, 2, 3, 4]])
Query the spatial index with an array of geometries based on the predicate:
>>> s.sindex.query_bulk(s2, predicate="contains")
array([[0],
[2]])
"""
return self.geometry.values.sindex
@property
def has_sindex(self):
"""Check the existence of the spatial index without generating it.
Use the `.sindex` attribute on a GeoDataFrame or GeoSeries
to generate a spatial index if it does not yet exist,
which may take considerable time based on the underlying index
implementation.
Note that the underlying spatial index may not be fully
initialized until the first use.
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d)
>>> gdf.has_sindex
False
>>> index = gdf.sindex
>>> gdf.has_sindex
True
Returns
-------
bool
`True` if the spatial index has been generated or
`False` if not.
"""
return self.geometry.values.has_sindex
def buffer(self, distance, resolution=16, **kwargs):
"""Returns a ``GeoSeries`` of geometries representing all points within
a given ``distance`` of each geometric object.
See http://shapely.readthedocs.io/en/latest/manual.html#object.buffer
for details.
Parameters
----------
distance : float, np.array, pd.Series
The radius of the buffer. If np.array or pd.Series are used
then it must have same length as the GeoSeries.
resolution : int (optional, default 16)
The resolution of the buffer around each vertex.
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(0, 0),
... LineString([(1, -1), (1, 0), (2, 0), (2, 1)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (0.00000 0.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000,...
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.buffer(0.2)
0 POLYGON ((0.20000 0.00000, 0.19904 -0.01960, 0...
1 POLYGON ((0.80000 0.00000, 0.80096 0.01960, 0....
2 POLYGON ((2.80000 -1.00000, 2.80000 1.00000, 2...
dtype: geometry
``**kwargs`` accept further specification as ``join_style`` and ``cap_style``.
See the following illustration of different options.
.. plot:: _static/code/buffer.py
"""
# TODO: update docstring based on pygeos after shapely 2.0
if isinstance(distance, pd.Series):
if not self.index.equals(distance.index):
raise ValueError(
"Index values of distance sequence does "
"not match index values of the GeoSeries"
)
distance = np.asarray(distance)
return _delegate_geo_method(
"buffer", self, distance, resolution=resolution, **kwargs
)
def simplify(self, *args, **kwargs):
"""Returns a ``GeoSeries`` containing a simplified representation of
each geometry.
The algorithm (Douglas-Peucker) recursively splits the original line
into smaller parts and connects these parts’ endpoints
by a straight line. Then, it removes all points whose distance
to the straight line is smaller than `tolerance`. It does not
move any points and it always preserves endpoints of
the original line or polygon.
See http://shapely.readthedocs.io/en/latest/manual.html#object.simplify
for details
Parameters
----------
tolerance : float
All parts of a simplified geometry will be no more than
`tolerance` distance from the original. It has the same units
as the coordinate reference system of the GeoSeries.
For example, using `tolerance=100` in a projected CRS with meters
as units means a distance of 100 meters in reality.
preserve_topology: bool (default True)
False uses a quicker algorithm, but may produce self-intersecting
or otherwise invalid geometries.
Notes
-----
Invalid geometric objects may result from simplification that does not
preserve topology and simplification may be sensitive to the order of
coordinates: two geometries differing only in order of coordinates may be
simplified differently.
Examples
--------
>>> from shapely.geometry import Point, LineString
>>> s = geopandas.GeoSeries(
... [Point(0, 0).buffer(1), LineString([(0, 0), (1, 10), (0, 20)])]
... )
>>> s
0 POLYGON ((1.00000 0.00000, 0.99518 -0.09802, 0...
1 LINESTRING (0.00000 0.00000, 1.00000 10.00000,...
dtype: geometry
>>> s.simplify(1)
0 POLYGON ((1.00000 0.00000, 0.00000 -1.00000, -...
1 LINESTRING (0.00000 0.00000, 0.00000 20.00000)
dtype: geometry
"""
return _delegate_geo_method("simplify", self, *args, **kwargs)
def relate(self, other, align=True):
"""
Returns the DE-9IM intersection matrices for the geometries
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : BaseGeometry or GeoSeries
The other geometry to computed
the DE-9IM intersection matrices from.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
----------
spatial_relations: Series of strings
The DE-9IM intersection matrices which describe
the spatial relations of the other geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can relate each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.relate(Polygon([(0, 0), (1, 1), (0, 1)]))
0 212F11FF2
1 212F11FF2
2 F11F00212
3 F01FF0212
4 F0FFFF212
dtype: object
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.relate(s2, align=True)
0 None
1 212F11FF2
2 0F1FF0102
3 1FFF0FFF2
4 FF0FFF0F2
5 None
dtype: object
>>> s.relate(s2, align=False)
0 212F11FF2
1 1F20F1102
2 0F1FF0102
3 0F1FF0FF2
4 0FFFFFFF2
dtype: object
"""
return _binary_op("relate", self, other, align)
def project(self, other, normalized=False, align=True):
"""
Return the distance along each geometry nearest to *other*
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
The project method is the inverse of interpolate.
Parameters
----------
other : BaseGeometry or GeoSeries
The *other* geometry to computed projected point from.
normalized : boolean
If normalized is True, return the distance normalized to
the length of the object.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series
Examples
--------
>>> from shapely.geometry import LineString, Point
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (2, 0), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Point(1, 0),
... Point(1, 0),
... Point(2, 1),
... ],
... index=range(1, 4),
... )
>>> s
0 LINESTRING (0.00000 0.00000, 2.00000 0.00000, ...
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
dtype: geometry
>>> s2
1 POINT (1.00000 0.00000)
2 POINT (1.00000 0.00000)
3 POINT (2.00000 1.00000)
dtype: geometry
We can project each geometry on a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.project(Point(1, 0))
0 1.000000
1 0.707107
2 0.707107
dtype: float64
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and project elements with the same index using
``align=True`` or ignore index and project elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.project(s2, align=True)
0 NaN
1 0.707107
2 0.707107
3 NaN
dtype: float64
>>> s.project(s2, align=False)
0 1.000000
1 0.707107
2 0.707107
dtype: float64
See also
--------
GeoSeries.interpolate
"""
return _binary_op("project", self, other, normalized=normalized, align=align)
def interpolate(self, distance, normalized=False):
"""
Return a point at the specified distance along each geometry
Parameters
----------
distance : float or Series of floats
Distance(s) along the geometries at which a point should be
returned. If np.array or pd.Series are used then it must have
same length as the GeoSeries.
normalized : boolean
If normalized is True, distance will be interpreted as a fraction
of the geometric object's length.
"""
if isinstance(distance, pd.Series):
if not self.index.equals(distance.index):
raise ValueError(
"Index values of distance sequence does "
"not match index values of the GeoSeries"
)
distance = np.asarray(distance)
return _delegate_geo_method(
"interpolate", self, distance, normalized=normalized
)
def affine_transform(self, matrix):
"""Return a ``GeoSeries`` with translated geometries.
See http://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.affine_transform
for details.
Parameters
----------
matrix: List or tuple
6 or 12 items for 2D or 3D transformations respectively.
For 2D affine transformations,
the 6 parameter matrix is ``[a, b, d, e, xoff, yoff]``
For 3D affine transformations,
the 12 parameter matrix is ``[a, b, c, d, e, f, g, h, i, xoff, yoff, zoff]``
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.affine_transform([2, 3, 2, 4, 5, 2])
0 POINT (10.00000 8.00000)
1 LINESTRING (4.00000 0.00000, 7.00000 4.00000)
2 POLYGON ((8.00000 4.00000, 13.00000 10.00000, ...
dtype: geometry
""" # noqa (E501 link is longer than max line length)
return _delegate_geo_method("affine_transform", self, matrix)
def translate(self, xoff=0.0, yoff=0.0, zoff=0.0):
"""Returns a ``GeoSeries`` with translated geometries.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.translate
for details.
Parameters
----------
xoff, yoff, zoff : float, float, float
Amount of offset along each dimension.
xoff, yoff, and zoff for translation along the x, y, and z
dimensions respectively.
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.translate(2, 3)
0 POINT (3.00000 4.00000)
1 LINESTRING (3.00000 2.00000, 3.00000 3.00000)
2 POLYGON ((5.00000 2.00000, 6.00000 3.00000, 5....
dtype: geometry
""" # noqa (E501 link is longer than max line length)
return _delegate_geo_method("translate", self, xoff, yoff, zoff)
def rotate(self, angle, origin="center", use_radians=False):
"""Returns a ``GeoSeries`` with rotated geometries.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.rotate
for details.
Parameters
----------
angle : float
The angle of rotation can be specified in either degrees (default)
or radians by setting use_radians=True. Positive angles are
counter-clockwise and negative are clockwise rotations.
origin : string, Point, or tuple (x, y)
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point
object or a coordinate tuple (x, y).
use_radians : boolean
Whether to interpret the angle of rotation as degrees or radians
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.rotate(90)
0 POINT (1.00000 1.00000)
1 LINESTRING (1.50000 -0.50000, 0.50000 -0.50000)
2 POLYGON ((4.50000 -0.50000, 3.50000 0.50000, 2...
dtype: geometry
>>> s.rotate(90, origin=(0, 0))
0 POINT (-1.00000 1.00000)
1 LINESTRING (1.00000 1.00000, 0.00000 1.00000)
2 POLYGON ((1.00000 3.00000, 0.00000 4.00000, -1...
dtype: geometry
"""
return _delegate_geo_method(
"rotate", self, angle, origin=origin, use_radians=use_radians
)
def scale(self, xfact=1.0, yfact=1.0, zfact=1.0, origin="center"):
"""Returns a ``GeoSeries`` with scaled geometries.
The geometries can be scaled by different factors along each
dimension. Negative scale factors will mirror or reflect coordinates.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.scale
for details.
Parameters
----------
xfact, yfact, zfact : float, float, float
Scaling factors for the x, y, and z dimensions respectively.
origin : string, Point, or tuple
The point of origin can be a keyword 'center' for the 2D bounding
box center (default), 'centroid' for the geometry's 2D centroid, a
Point object or a coordinate tuple (x, y, z).
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.scale(2, 3)
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -2.00000, 1.00000 1.00000)
2 POLYGON ((2.50000 -3.00000, 4.50000 0.00000, 2...
dtype: geometry
>>> s.scale(2, 3, origin=(0, 0))
0 POINT (2.00000 3.00000)
1 LINESTRING (2.00000 -3.00000, 2.00000 0.00000)
2 POLYGON ((6.00000 -3.00000, 8.00000 0.00000, 6...
dtype: geometry
"""
return _delegate_geo_method("scale", self, xfact, yfact, zfact, origin=origin)
def skew(self, xs=0.0, ys=0.0, origin="center", use_radians=False):
"""Returns a ``GeoSeries`` with skewed geometries.
The geometries are sheared by angles along the x and y dimensions.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.skew
for details.
Parameters
----------
xs, ys : float, float
The shear angle(s) for the x and y axes respectively. These can be
specified in either degrees (default) or radians by setting
use_radians=True.
origin : string, Point, or tuple (x, y)
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point
object or a coordinate tuple (x, y).
use_radians : boolean
Whether to interpret the shear angle(s) as degrees or radians
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.skew(45, 30)
0 POINT (1.00000 1.00000)
1 LINESTRING (0.50000 -1.00000, 1.50000 0.00000)
2 POLYGON ((2.00000 -1.28868, 4.00000 0.28868, 4...
dtype: geometry
>>> s.skew(45, 30, origin=(0, 0))
0 POINT (2.00000 1.57735)
1 LINESTRING (0.00000 -0.42265, 1.00000 0.57735)
2 POLYGON ((2.00000 0.73205, 4.00000 2.30940, 4....
dtype: geometry
"""
return _delegate_geo_method(
"skew", self, xs, ys, origin=origin, use_radians=use_radians
)
@property
def cx(self):
"""
Coordinate based indexer to select by intersection with bounding box.
Format of input should be ``.cx[xmin:xmax, ymin:ymax]``. Any of
``xmin``, ``xmax``, ``ymin``, and ``ymax`` can be provided, but input
must include a comma separating x and y slices. That is, ``.cx[:, :]``
will return the full series/frame, but ``.cx[:]`` is not implemented.
Examples
--------
>>> from shapely.geometry import LineString, Point
>>> s = geopandas.GeoSeries(
... [Point(0, 0), Point(1, 2), Point(3, 3), LineString([(0, 0), (3, 3)])]
... )
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 2.00000)
2 POINT (3.00000 3.00000)
3 LINESTRING (0.00000 0.00000, 3.00000 3.00000)
dtype: geometry
>>> s.cx[0:1, 0:1]
0 POINT (0.00000 0.00000)
3 LINESTRING (0.00000 0.00000, 3.00000 3.00000)
dtype: geometry
>>> s.cx[:, 1:]
1 POINT (1.00000 2.00000)
2 POINT (3.00000 3.00000)
3 LINESTRING (0.00000 0.00000, 3.00000 3.00000)
dtype: geometry
"""
return _CoordinateIndexer(self)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two GeoSeries or GeoDataFrames to be compared
against each other to see if they have the same shape and elements.
Missing values in the same location are considered equal. The
row/column index do not need to have the same type (as long as the
values are still considered equal), but the dtypes of the respective
columns must be the same.
Parameters
----------
other : GeoSeries or GeoDataFrame
The other GeoSeries or GeoDataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
"""
# we override this because pandas is using `self._constructor` in the
# isinstance check (https://github.com/geopandas/geopandas/issues/1420)
if not isinstance(other, type(self)):
return False
return self._data.equals(other._data)
class _CoordinateIndexer(object):
# see docstring GeoPandasBase.cx property above
def __init__(self, obj):
self.obj = obj
def __getitem__(self, key):
obj = self.obj
xs, ys = key
# handle numeric values as x and/or y coordinate index
if type(xs) is not slice:
xs = slice(xs, xs)
if type(ys) is not slice:
ys = slice(ys, ys)
# don't know how to handle step; should this raise?
if xs.step is not None or ys.step is not None:
warn("Ignoring step - full interval is used.")
if xs.start is None or xs.stop is None or ys.start is None or ys.stop is None:
xmin, ymin, xmax, ymax = obj.total_bounds
bbox = box(
xs.start if xs.start is not None else xmin,
ys.start if ys.start is not None else ymin,
xs.stop if xs.stop is not None else xmax,
ys.stop if ys.stop is not None else ymax,
)
idx = obj.intersects(bbox)
return obj[idx]
|
jdmcbr/geopandas
|
geopandas/base.py
|
Python
|
bsd-3-clause
| 112,668
|
[
"Bowtie"
] |
fb41086cf21d980691f471aa3d00838a311044720bdb01a7521e7c2ded590dd1
|
# -*- coding: utf-8 -*-
###############################################################################
#
# This source file is part of the tomviz project.
#
# Copyright Kitware, Inc.
#
# This source code is released under the New BSD License, (the "License").
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import math
import numpy as np
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.util.numpy_support as np_s
def get_scalars(dataobject):
do = dsa.WrapDataObject(dataobject)
# get the first
rawarray = do.PointData.GetScalars()
vtkarray = dsa.vtkDataArrayToVTKArray(rawarray, do)
vtkarray.Association = dsa.ArrayAssociation.POINT
return vtkarray
def is_numpy_vtk_type(newscalars):
# Indicate whether the type is known/supported by VTK to NumPy routines.
try:
np_s.get_vtk_array_type(newscalars.dtype)
except TypeError:
return False
else:
return True
def set_scalars(dataobject, newscalars):
do = dsa.WrapDataObject(dataobject)
oldscalars = do.PointData.GetScalars()
name = oldscalars.GetName()
del oldscalars
if not is_numpy_vtk_type(newscalars):
newscalars = newscalars.astype(np.float32)
do.PointData.append(newscalars, name)
do.PointData.SetActiveScalars(name)
def get_array(dataobject, order='F'):
scalars_array = get_scalars(dataobject)
if order == 'F':
scalars_array3d = np.reshape(scalars_array,
(dataobject.GetDimensions()),
order=order)
else:
scalars_array3d = np.reshape(scalars_array,
(dataobject.GetDimensions()[::-1]),
order=order)
return scalars_array3d
def set_array(dataobject, newarray, minextent=None, isFortran=True):
# Set the extent if needed, i.e. if the minextent is not the same as
# the data object starting index, or if the newarray shape is not the same
# as the size of the dataobject.
# isFortran indicates whether the NumPy array has Fortran-order indexing,
# i.e. i,j,k indexing. If isFortran is False, then the NumPy array uses
# C-order indexing, i.e. k,j,i indexing.
if isFortran is False:
# Flatten according to array.flags
arr = newarray.ravel(order='A')
if newarray.flags.f_contiguous:
vtkshape = newarray.shape
else:
vtkshape = newarray.shape[::-1]
elif np.isfortran(newarray):
arr = newarray.reshape(-1, order='F')
vtkshape = newarray.shape
else:
print('Warning, array does not have Fortran order, making deep copy '
'and fixing...')
vtkshape = newarray.shape
tmp = np.asfortranarray(newarray)
arr = tmp.reshape(-1, order='F')
print('...done.')
if not is_numpy_vtk_type(arr):
arr = arr.astype(np.float32)
if minextent is None:
minextent = dataobject.GetExtent()[::2]
sameindex = list(minextent) == list(dataobject.GetExtent()[::2])
sameshape = list(vtkshape) == list(dataobject.GetDimensions())
if not sameindex or not sameshape:
extent = 6*[0]
extent[::2] = minextent
extent[1::2] = \
[x + y - 1 for (x, y) in zip(minextent, vtkshape)]
dataobject.SetExtent(extent)
# Now replace the scalars array with the new array.
vtkarray = np_s.numpy_to_vtk(arr)
vtkarray.Association = dsa.ArrayAssociation.POINT
do = dsa.WrapDataObject(dataobject)
oldscalars = do.PointData.GetScalars()
arrayname = "Scalars"
if oldscalars is not None:
arrayname = oldscalars.GetName()
del oldscalars
do.PointData.append(arr, arrayname)
do.PointData.SetActiveScalars(arrayname)
def get_tilt_angles(dataobject):
# Get the tilt angles array
do = dsa.WrapDataObject(dataobject)
rawarray = do.FieldData.GetArray('tilt_angles')
vtkarray = dsa.vtkDataArrayToVTKArray(rawarray, do)
vtkarray.Association = dsa.ArrayAssociation.FIELD
return vtkarray
def set_tilt_angles(dataobject, newarray):
# replace the tilt angles with the new array
from vtk import VTK_DOUBLE
# deep copy avoids having to keep numpy array around, but is more
# expensive. I don't expect tilt_angles to be a big array though.
vtkarray = np_s.numpy_to_vtk(newarray, deep=1, array_type=VTK_DOUBLE)
vtkarray.Association = dsa.ArrayAssociation.FIELD
vtkarray.SetName('tilt_angles')
do = dsa.WrapDataObject(dataobject)
do.FieldData.RemoveArray('tilt_angles')
do.FieldData.AddArray(vtkarray)
def get_coordinate_arrays(dataset):
"""Returns a triple of Numpy arrays containing x, y, and z coordinates for
each point in the dataset. This can be used to evaluate a function at each
point, for instance.
"""
assert dataset.IsA("vtkImageData"), "Dataset must be a vtkImageData"
# Create meshgrid for image
spacing = dataset.GetSpacing()
origin = dataset.GetOrigin()
dims = dataset.GetDimensions()
x = [origin[0] + (spacing[0] * i) for i in range(dims[0])]
y = [origin[1] + (spacing[1] * i) for i in range(dims[1])]
z = [origin[2] + (spacing[2] * i) for i in range(dims[2])]
# The funny ordering is to match VTK's convention for point storage
yy, xx, zz = np.meshgrid(y, x, z)
return (xx, yy, zz)
def connected_components(dataset, background_value=0, progress_callback=None):
try:
import itk
import itkTypes
import vtk
from tomviz import itkutils
except Exception as exc:
print("Could not import necessary module(s)")
print(exc)
scalarType = dataset.GetScalarType()
if scalarType == vtk.VTK_FLOAT or scalarType == vtk.VTK_DOUBLE:
raise Exception(
"Connected Components works only on images with integral types.")
# Add a try/except around the ITK portion. ITK exceptions are
# passed up to the Python layer, so we can at least report what
# went wrong with the script, e.g,, unsupported image type.
try:
# Get the ITK image. The input is assumed to have an integral type.
# Take care of casting to an unsigned short image so we can store up
# to 65,535 connected components (the number of connected components
# is limited to the maximum representable number in the voxel type
# of the input image in the ConnectedComponentsFilter).
itk_image = itkutils.convert_vtk_to_itk_image(dataset, itkTypes.US)
itk_image_type = type(itk_image)
# ConnectedComponentImageFilter
connected_filter = itk.ConnectedComponentImageFilter[
itk_image_type, itk_image_type].New()
connected_filter.SetBackgroundValue(background_value)
connected_filter.SetInput(itk_image)
if progress_callback is not None:
def connected_progress_func():
progress = connected_filter.GetProgress()
abort = progress_callback(progress * 0.5)
connected_filter.SetAbortGenerateData(abort)
connected_observer = itk.PyCommand.New()
connected_observer.SetCommandCallable(connected_progress_func)
connected_filter.AddObserver(itk.ProgressEvent(),
connected_observer)
# Relabel filter. This will compress the label numbers to a
# continugous range between 1 and n where n is the number of
# labels. It will also sort the components from largest to
# smallest, where the largest component has label 1, the
# second largest has label 2, and so on...
relabel_filter = itk.RelabelComponentImageFilter[
itk_image_type, itk_image_type].New()
relabel_filter.SetInput(connected_filter.GetOutput())
relabel_filter.SortByObjectSizeOn()
if progress_callback is not None:
def relabel_progress_func():
progress = relabel_filter.GetProgress()
abort = progress_callback(progress * 0.5 + 0.5)
relabel_filter.SetAbortGenerateData(abort)
relabel_observer = itk.PyCommand.New()
relabel_observer.SetCommandCallable(relabel_progress_func)
relabel_filter.AddObserver(itk.ProgressEvent(), relabel_observer)
try:
relabel_filter.Update()
except RuntimeError:
return
itk_image_data = relabel_filter.GetOutput()
label_buffer = itk.PyBuffer[
itk_image_type].GetArrayFromImage(itk_image_data)
# Flip the labels so that the largest component has the highest label
# value, e.g., the labeling ordering by size goes from [1, 2, ... N] to
# [N, N-1, N-2, ..., 1]. Note that zero is the background value, so we
# do not want to change it.
import numpy as np
minimum = 1 # Minimum label is always 1, background is 0
maximum = np.max(label_buffer)
# Try more memory-efficient approach
gt_zero = label_buffer > 0
label_buffer[gt_zero] = minimum - label_buffer[gt_zero] + maximum
set_array(dataset, label_buffer, isFortran=False)
except Exception as exc:
print("Problem encountered while running ConnectedComponents")
raise exc
def label_object_principal_axes(dataset, label_value):
import numpy as np
from tomviz import utils
labels = utils.get_array(dataset)
num_voxels = np.sum(labels == label_value)
xx, yy, zz = utils.get_coordinate_arrays(dataset)
data = np.zeros((num_voxels, 3))
selection = labels == label_value
assert np.any(selection), \
"No voxels with label %d in label map" % label_value
data[:, 0] = xx[selection]
data[:, 1] = yy[selection]
data[:, 2] = zz[selection]
# Compute PCA on coordinates
from scipy import linalg as la
m, n = data.shape
center = data.mean(axis=0)
data -= center
R = np.cov(data, rowvar=False)
evals, evecs = la.eigh(R)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
return (evecs, center)
def make_dataset(x, y, z, dataset, generate_data_function, **kwargs):
from vtk import VTK_DOUBLE
array = np.zeros((x, y, z), order='F')
generate_data_function(array, **kwargs)
dataset.SetOrigin(0, 0, 0)
dataset.SetSpacing(1, 1, 1)
dataset.SetExtent(0, x - 1, 0, y - 1, 0, z - 1)
flat_array = array.reshape(-1, order='F')
vtkarray = np_s.numpy_to_vtk(flat_array, deep=1, array_type=VTK_DOUBLE)
vtkarray.SetName("generated_scalars")
dataset.GetPointData().SetScalars(vtkarray)
def mark_as_volume(dataobject):
from vtk import vtkTypeInt8Array
fd = dataobject.GetFieldData()
arr = fd.GetArray("tomviz_data_source_type")
if arr is None:
arr = vtkTypeInt8Array()
arr.SetNumberOfComponents(1)
arr.SetNumberOfTuples(1)
arr.SetName("tomviz_data_source_type")
fd.AddArray(arr)
arr.SetTuple1(0, 0)
def mark_as_tiltseries(dataobject):
from vtk import vtkTypeInt8Array
fd = dataobject.GetFieldData()
arr = fd.GetArray("tomviz_data_source_type")
if arr is None:
arr = vtkTypeInt8Array()
arr.SetNumberOfComponents(1)
arr.SetNumberOfTuples(1)
arr.SetName("tomviz_data_source_type")
fd.AddArray(arr)
arr.SetTuple1(0, 1)
def make_spreadsheet(column_names, table):
# column_names is a list of strings
# table is a 2D numpy.ndarray
# returns a vtkTable object that stores the table content
# Create a vtkTable to store the output.
rows = table.shape[0]
if (table.shape[1] != len(column_names)):
print('Warning: table number of columns differs from number of '
'column names')
return
from vtk import vtkTable, vtkFloatArray
vtk_table = vtkTable()
for (column, name) in enumerate(column_names):
array = vtkFloatArray()
array.SetName(name)
array.SetNumberOfComponents(1)
array.SetNumberOfTuples(rows)
vtk_table.AddColumn(array)
for row in range(0, rows):
array.InsertValue(row, table[row, column])
return vtk_table
def zoom_shape(input, zoom):
"""
Returns the shape of the output array for scipy.ndimage.interpolation.zoom
:param input The input array
:type input: ndarray
:param zoom The zoom factor
:type zoom: ndarray
"""
if isinstance(zoom, (int, float,)):
zoom = [zoom] * input.ndim
return tuple(
[int(round(i * j)) for i, j in zip(input.shape, zoom)])
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate_shape(input, angle, axes):
"""
Returns the shape of the output array for scipy.ndimage.interpolation.rotate
derived from: https://github.com/scipy/scipy/blob/v0.16.1/scipy/ndimage/ \
interpolation.py #L578. We are duplicating the code here so we can generate
an array of the right shape and array order to pass into the rotate
function.
:param input The input array
:type: ndarray
:param angle The rotation angle in degrees.
:type: float
:param axes The two axes that define the plane of rotation.
Default is the first two axes.
:type: tuple of 2 ints
"""
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = np.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = np.array([[m11, m12],
[m21, m22]], dtype=np.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
mtrx = np.array([[m11, -m21],
[-m12, m22]], dtype=np.float64)
minc = [0, 0]
maxc = [0, 0]
coor = np.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = np.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = np.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
offset = np.zeros((2,), dtype=np.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = np.dot(matrix, offset)
tmp = np.zeros((2,), dtype=np.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
return output_shape
|
cjh1/tomviz
|
tomviz/python/tomviz/utils.py
|
Python
|
bsd-3-clause
| 15,400
|
[
"VTK"
] |
134c0ed8dcfc6aeaef34c4aafee44a00b0e31fc12ae7c71be2aadb19eac63a38
|
from itertools import chain
from optparse import make_option
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from catmaid.control.tracing import check_tracing_setup
from catmaid.models import Class, ClassInstance, ClassInstanceClassInstance, \
Relation, Connector, Project, Treenode, TreenodeConnector
class Exporter():
def __init__(self, project, options):
self.project = project
self.options = options
self.export_treenodes = options['export_treenodes']
self.export_connectors = options['export_connectors']
self.export_annotations = options['export_annotations']
self.export_tags = options['export_tags']
self.required_annotations = options['required_annotations']
self.target_file = 'export_pid_%s.json' % project.id
self.show_traceback = True
self.format = 'json'
self.indent = 2
self.to_serialize = []
self.seen = {}
def collect_data(self):
self.to_serialize = []
classes = dict(Class.objects.filter(
project=self.project).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(
project=self.project).values_list('relation_name', 'id'))
if not check_tracing_setup(self.project.id, classes, relations):
raise ValueError("Project with ID %s is no tracing project." % self.project.id)
skeleton_id_constraints = None
entities = ClassInstance.objects.filter(project=self.project,
class_column__in=[classes['neuron']])
skeleton_links = ClassInstanceClassInstance.objects.filter(
project_id=self.project.id, relation=relations['model_of'],
class_instance_a__class_column=classes['skeleton'])
skeletons = ClassInstance.objects.filter(project=self.project,
class_column__in=[classes['skeleton']])
if self.required_annotations:
# Get mapping from annotations to IDs
a_to_id = dict(ClassInstance.objects.filter(
project=self.project, class_column=classes['annotation'],
name__in=self.required_annotations).values_list('name', 'id'))
print("Found entities with the following annotations: %s" % \
", ".join(a_to_id.keys()))
entities = ClassInstance.objects.filter(project=self.project,
class_column=classes['neuron'],
cici_via_a__relation_id=relations['annotated_with'],
cici_via_a__class_instance_b_id__in=a_to_id.values())
# Get the corresponding skeleton IDs
skeleton_links = ClassInstanceClassInstance.objects.filter(
project_id=self.project.id, relation=relations['model_of'],
class_instance_a__class_column=classes['skeleton'],
class_instance_b__in=entities)
skeleton_id_constraints = set(skeleton_links.values_list(
'class_instance_a', flat=True))
skeletons = ClassInstance.objects.filter(project=self.project,
id__in=skeleton_id_constraints)
print("Will export %s entities" % entities.count())
# Export classes and relations
self.to_serialize.append(Class.objects.filter(project=self.project))
self.to_serialize.append(Relation.objects.filter(project=self.project))
# Export skeleton-neuron links
self.to_serialize.append(entities)
self.to_serialize.append(skeleton_links)
self.to_serialize.append(skeletons)
if skeleton_id_constraints:
# Export treenodes
if self.export_treenodes:
treenodes = Treenode.objects.filter(
project=self.project,
skeleton_id__in=skeleton_id_constraints)
self.to_serialize.append(treenodes)
exported_tids = set(treenodes.values_list('id', flat=True))
print("Exporting %s treenodes" % len(exported_tids))
# Export connectors and connector links
if self.export_connectors:
connector_links = TreenodeConnector.objects.filter(
project=self.project, skeleton_id__in=skeleton_id_constraints).values_list('id', 'connector', 'treenode')
# Add matching connecots
connector_ids = set(c for _,c,_ in connector_links)
self.to_serialize.append(Connector.objects.filter(
id__in=connector_ids))
print("Exporting %s connectors" % len(connector_ids))
# Add matching connector links
self.to_serialize.append(TreenodeConnector.objects.filter(
id__in=[l for l,_,_ in connector_links]))
# Add addition placeholde treenodes
connector_tids = set(TreenodeConnector.objects \
.filter(project=self.project, connector__in=connector_ids) \
.exclude(skeleton_id__in=skeleton_id_constraints) \
.values_list('treenode', flat=True))
extra_tids = connector_tids - exported_tids
print("Exporting %s placeholder nodes" % len(extra_tids))
self.to_serialize.append(Treenode.objects.filter(id__in=extra_tids))
# Add additional skeletons and neuron-skeleton links
extra_skids = set(Treenode.objects.filter(id__in=extra_tids,
project=self.project).values_list('skeleton_id', flat=True))
self.to_serialize.append(ClassInstance.objects.filter(id__in=extra_skids))
extra_links = ClassInstanceClassInstance.objects \
.filter(project=self.project,
class_instance_a__in=extra_skids,
relation=relations['model_of'])
self.to_serialize.append(extra_links)
extra_nids = extra_links.values_list('class_instance_b', flat=True)
self.to_serialize.append(ClassInstance.objects.filter(
project=self.project, id__in=extra_nids))
# Export annotations and annotation-neuron links, liked to selected
# entities.
if self.export_annotations and 'annotated_with' in relations:
annotation_links = ClassInstanceClassInstance.objects.filter(
project_id=self.project.id, relation=relations['annotated_with'],
class_instance_a__in=entities)
annotations = ClassInstance.objects.filter(project_id=self.project.id,
cici_via_b__in=annotation_links)
self.to_serialize.append(annotations)
self.to_serialize.append(annotation_links)
# TODO: Export reviews
else:
# Export treenodes
if self.export_treenodes:
if skeleton_id_constraints:
pass
else:
self.to_serialize.append(Treenode.objects.filter(
project=self.project))
# Export connectors and connector links
if self.export_connectors:
self.to_serialize.append(Connector.objects.filter(
project=self.project))
self.to_serialize.append(TreenodeConnector.objects.filter(
project=self.project))
# Export annotations and annotation-neuron links
if self.export_annotations and 'annotated_with' in relations:
annotation_links = ClassInstanceClassInstance.objects.filter(
project_id=self.project.id, relation=relations['annotated_with'],
class_instance_a__in=entities)
annotations = ClassInstance.objects.filter(project_id=self.project.id,
cici_via_b__in=annotation_links)
self.to_serialize.append(annotations)
self.to_serialize.append(annotation_links)
# TODO: Export reviews
def export(self):
""" Writes all objects matching
"""
try:
self.collect_data()
data = list(chain(*self.to_serialize))
CurrentSerializer = serializers.get_serializer(self.format)
serializer = CurrentSerializer()
with open(self.target_file, "w") as out:
serializer.serialize(data, indent=self.indent, stream=out)
except Exception, e:
if self.show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
class Command(BaseCommand):
""" Call e.g. like
./manage.py catmaid_export_data --source 1 --required-annotation "Kenyon cells"
"""
help = "Export CATMAID data into a JSON representation"
def add_arguments(self, parser):
parser.add_argument('--source', default=None,
help='The ID of the source project')
parser.add_argument('--treenodes', dest='export_treenodes', default=True,
action='store_true', help='Export treenodes from source')
parser.add_argument('--notreenodes', dest='export_treenodes',
action='store_false', help='Don\'t export treenodes from source')
parser.add_argument('--connectors', dest='export_connectors', default=True,
action='store_true', help='Export connectors from source')
parser.add_argument('--noconnectors', dest='export_connectors',
action='store_false', help='Don\'t export connectors from source')
parser.add_argument('--annotations', dest='export_annotations', default=True,
action='store_true', help='Export annotations from source')
parser.add_argument('--noannotations', dest='export_annotations',
action='store_false', help='Don\'t export annotations from source')
parser.add_argument('--tags', dest='export_tags', default=True,
action='store_true', help='Export tags from source')
parser.add_argument('--notags', dest='export_tags',
action='store_false', help='Don\'t export tags from source')
parser.add_argument('--required-annotation', dest='required_annotations',
action='append', help='Name a required annotation for exported skeletons.')
parser.add_argument('--connector-placeholders', dest='connector_placeholders',
action='store_true', help='Should placeholder nodes be exported')
def ask_for_project(self, title):
""" Return a valid project object.
"""
def ask():
print("Please enter the number for the %s project:" % title)
projects = Project.objects.all()
for n,p in enumerate(projects):
print("%s: %s" % (n, p))
selection = raw_input("Selection: ")
try:
return projects[int(selection)]
except ValueError, IndexError:
return None
while True:
p = ask()
if p:
return p
def handle(self, *args, **options):
# Give some information about the export
will_export = []
wont_export = []
for t in ('treenodes', 'connectors', 'annotations', 'tags'):
if options['export_' + t]:
will_export.append(t)
else:
wont_export.append(t)
if will_export:
print("Will export: " + ", ".join(will_export))
else:
print("Nothing selected for export")
return
if wont_export:
print("Won't export: " + ", ".join(wont_export))
# Read soure and target
if not options['source']:
source = self.ask_for_project('source')
else:
source = Project.objects.get(pk=options['source'])
# Process with export
if (options['required_annotations']):
print("Needed annotations for exported skeletons: " +
", ".join(options['required_annotations']))
exporter = Exporter(source, options)
exporter.export()
print("Finished export, result written to: %s" % exporter.target_file)
|
catsop/CATMAID
|
django/applications/catmaid/management/commands/catmaid_export_data.py
|
Python
|
gpl-3.0
| 12,506
|
[
"NEURON"
] |
2baf2713ee42b6db639c3c2bf2db292ab153b74e2ab025e50ff978e46ff0fde5
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011> Gabriel Falcao <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import ast
import os
import re
from setuptools import setup, find_packages
class VersionFinder(ast.NodeVisitor):
def __init__(self):
self.version = None
def visit_Assign(self, node):
if node.targets[0].id == '__version__':
self.version = node.value.s
def read_version():
"""Read version from httpretty/version.py without loading any files"""
finder = VersionFinder()
finder.visit(ast.parse(local_file('httpretty', '__init__.py')))
return finder.version
def parse_requirements(path):
"""Rudimentary parser for the `requirements.txt` file
We just want to separate regular packages from links to pass them to the
`install_requires` and `dependency_links` params of the `setup()`
function properly.
"""
try:
requirements = map(str.strip, local_file(path).splitlines())
except IOError:
raise RuntimeError("Couldn't find the `requirements.txt' file :(")
links = []
pkgs = []
for req in requirements:
if not req:
continue
if 'http:' in req or 'https:' in req:
links.append(req)
name, version = re.findall("\#egg=([^\-]+)-(.+$)", req)[0]
pkgs.append('{0}=={1}'.format(name, version))
else:
pkgs.append(req)
return pkgs, links
local_file = lambda *f: \
open(os.path.join(os.path.dirname(__file__), *f)).read()
install_requires, dependency_links = \
parse_requirements('requirements.txt')
setup(name='httpretty',
version=read_version(),
description='HTTP client mock for Python',
long_description=local_file('README.rst'),
author='Gabriel Falcao',
author_email='gabriel@nacaolivre.org',
url='http://github.com/gabrielfalcao/httpretty',
zip_safe=False,
packages=find_packages(exclude=['*tests*']),
tests_require=parse_requirements('test-requirements.txt'),
install_requires=install_requires,
dependency_links=dependency_links,
license='MIT',
test_suite='nose.collector',
classifiers=["Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Testing"],
)
|
letolab/HTTPretty
|
setup.py
|
Python
|
mit
| 3,428
|
[
"VisIt"
] |
39d1ee85669d8f1d9314031599767fc430ab38fae99e24c07b61b133c6984592
|
# Copyright (C) 2012,2013,2015,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************
espressopp.VerletList
*********************
.. function:: espressopp.VerletList(system, cutoff, exclusionlist)
:param system:
:param cutoff:
:param exclusionlist: (default: [])
:type system:
:type cutoff:
:type exclusionlist:
.. function:: espressopp.VerletList.exclude(exclusionlist)
:param exclusionlist:
:type exclusionlist:
:rtype:
.. function:: espressopp.VerletList.getAllPairs()
:rtype:
.. function:: espressopp.VerletList.localSize()
:rtype:
.. function:: espressopp.VerletList.totalSize()
:rtype:
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class VerletListLocal(_espressopp.VerletList):
def __init__(self, system, cutoff, exclusionlist=[]):
if pmi.workerIsActive():
if (exclusionlist == []):
# rebuild list in constructor
cxxinit(self, _espressopp.VerletList, system, cutoff, True)
else:
# do not rebuild list in constructor
cxxinit(self, _espressopp.VerletList, system, cutoff, False)
# add exclusions
for pair in exclusionlist:
pid1, pid2 = pair
self.cxxclass.exclude(self, pid1, pid2)
# now rebuild list with exclusions
self.cxxclass.rebuild(self)
def totalSize(self):
if pmi.workerIsActive():
return self.cxxclass.totalSize(self)
def localSize(self):
if pmi.workerIsActive():
return self.cxxclass.localSize(self)
def exclude(self, exclusionlist):
"""
Each processor takes the broadcasted exclusion list
and adds it to its list.
"""
if pmi.workerIsActive():
for pair in exclusionlist:
pid1, pid2 = pair
self.cxxclass.exclude(self, pid1, pid2)
# rebuild list with exclusions
self.cxxclass.rebuild(self)
def getAllPairs(self):
if pmi.workerIsActive():
pairs=[]
npairs=self.localSize()
for i in xrange(npairs):
pair=self.cxxclass.getPair(self, i+1)
pairs.append(pair)
return pairs
if pmi.isController:
class VerletList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.VerletListLocal',
pmiproperty = [ 'builds' ],
pmicall = [ 'totalSize', 'exclude', 'connect', 'disconnect', 'getVerletCutoff' ],
pmiinvoke = [ 'getAllPairs' ]
)
|
kkreis/espressopp
|
src/VerletList.py
|
Python
|
gpl-3.0
| 3,541
|
[
"ESPResSo"
] |
85c28637d774883358c09a0befffca2b4d7079aede72a0161a56deffe8ec45cd
|
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array, deprecated
from ..utils.extmath import norm
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
k=None, eigen_tol=0.0,
assign_labels='kmeans',
mode=None):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
if not k is None:
warnings.warn("'k' was renamed to n_clusters and will "
"be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if not mode is None:
warnings.warn("'mode' was renamed to eigen_solver "
"and will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either the
Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity: string, 'nearest_neighbors', 'rbf' or 'precomputed'
gamma: float
Scaling factor of Gaussian (rbf) affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10, k=None,
eigen_tol=0.0, assign_labels='kmeans', mode=None):
if k is not None:
warnings.warn("'k' was renamed to n_clusters and "
"will be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if mode is not None:
warnings.warn("'mode' was renamed to eigen_solver and "
"will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'rbf':
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma)
elif self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
raise ValueError("Invalid 'affinity'. Expected 'rbf', "
"'nearest_neighbors' or 'precomputed', got '%s'."
% self.affinity)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
@property
@deprecated("'mode' was renamed to eigen_solver and will be removed in"
" 0.15.")
def mode(self):
return self.eigen_solver
@property
@deprecated("'k' was renamed to n_clusters and will be removed in"
" 0.15.")
def k(self):
return self.n_clusters
|
florian-f/sklearn
|
sklearn/cluster/spectral.py
|
Python
|
bsd-3-clause
| 17,870
|
[
"Brian",
"Gaussian"
] |
04f2ddb724b046e4c06d9fe5feeb42983c38ea41063fd45a942f1012d9f7753a
|
#!/usr/local/bin/python
# Script: dumpsort.py
# Purpose: sort the snapshots in a LAMMPS dump file by atom ID
# Syntax: dumpsort.py oldfile N newfile
# oldfile = old LAMMPS dump file in native LAMMPS format
# N = column # for atom ID (usually 1)
# newfile = new sorted LAMMPS dump file
# Author: Steve Plimpton (Sandia), sjplimp at sandia.gov
import sys,os
path = os.environ["LAMMPS_PYTHON_TOOLS"]
sys.path.append(path)
from dump import dump
if len(sys.argv) != 4:
raise StandardError, "Syntax: dumpsort.py oldfile N newfile"
oldfile = sys.argv[1]
ncolumn = int(sys.argv[2])
newfile = sys.argv[3]
d = dump(oldfile)
d.map(ncolumn,"id")
d.sort()
d.write(newfile)
|
val-github/lammps-dev
|
tools/python/dumpsort.py
|
Python
|
gpl-2.0
| 697
|
[
"LAMMPS"
] |
64a7ed67d0db2ad3ee32ece32888950a2eeab52c883b27224d84f4da7692d87b
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of Neural Net (NN) functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.distribute import distribution_strategy_context as ds
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops # pylint: disable=unused-import
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import util as losses_util
from tensorflow.python.platform import device_context
from tensorflow.python.util import dispatch
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
@tf_export("nn.log_poisson_loss")
@dispatch.add_dispatch_support
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
"""Computes log Poisson loss given `log_input`.
Gives the log-likelihood loss between the prediction and the target under the
assumption that the target has a Poisson distribution.
Caveat: By default, this is not the exact loss, but the loss minus a
constant term [log(z!)]. That has no effect for optimization, but
does not play well with relative loss comparisons. To compute an
approximation of the log factorial term, specify
compute_full_loss=True to enable Stirling's Approximation.
For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson
loss is
-log(exp(-x) * (x^z) / z!)
= -log(exp(-x) * (x^z)) + log(z!)
~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
[ Note the second term is the Stirling's Approximation for log(z!).
It is invariant to x and does not affect optimization, though
important for correct relative loss comparisons. It is only
computed when compute_full_loss == True. ]
= x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
targets: A `Tensor` of the same type and shape as `log_input`.
log_input: A `Tensor` of type `float32` or `float64`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
ValueError: If `log_input` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name:
log_input = ops.convert_to_tensor(log_input, name="log_input")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().assert_is_compatible_with(log_input.get_shape())
except ValueError:
raise ValueError(
"log_input and targets must have the same shape (%s vs %s)" %
(log_input.get_shape(), targets.get_shape()))
result = math_ops.exp(log_input) - log_input * targets
if compute_full_loss:
# need to create constant tensors here so that their dtypes can be matched
# to that of the targets.
point_five = constant_op.constant(0.5, dtype=targets.dtype)
two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype)
stirling_approx = (targets * math_ops.log(targets)) - targets + (
point_five * math_ops.log(two_pi * targets))
zeros = array_ops.zeros_like(targets, dtype=targets.dtype)
ones = array_ops.ones_like(targets, dtype=targets.dtype)
cond = math_ops.logical_and(targets >= zeros, targets <= ones)
result += array_ops.where(cond, zeros, stirling_approx)
return result
@tf_export(v1=["nn.sigmoid_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name
_sentinel=None,
labels=None,
logits=None,
name=None):
"""See sigmoid_cross_entropy_with_logits_v2."""
# pylint: disable=protected-access
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,
labels, logits)
# pylint: enable=protected-access
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().assert_is_compatible_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
(logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# Note that these two expressions can be combined into the following:
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
# To allow computing gradients at zero, we define custom versions of max and
# abs functions.
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits) # pylint: disable=invalid-unary-operand-type
return math_ops.add(
relu_logits - logits * labels,
math_ops.log1p(math_ops.exp(neg_abs_logits)),
name=name)
# Note: intentionally calling this v2 to not allow existing code with indirect
# imports to ignore the sentinel behavior.
@tf_export("nn.sigmoid_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def sigmoid_cross_entropy_with_logits_v2( # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
r"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in tasks with two outcomes in which each
outcome is independent and need not have a fully certain label. For instance,
one could perform a regression where the probability of an event happening is
known and used as a label. This loss may also be used for binary
classification, where labels are either zero or one.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
>>> logits = tf.constant([1., -1., 0., 1., -1., 0., 0.])
>>> labels = tf.constant([0., 0., 0., 1., 1., 1., 0.5])
>>> tf.nn.sigmoid_cross_entropy_with_logits(
... labels=labels, logits=logits).numpy()
array([1.3132617, 0.3132617, 0.6931472, 0.3132617, 1.3132617, 0.6931472,
0.6931472], dtype=float32)
Compared to the losses which handle multiple outcomes,
`tf.nn.softmax_cross_entropy_with_logits` for general multi-class
classification and `tf.nn.sparse_softmax_cross_entropy_with_logits` for more
efficient multi-class classification with hard labels,
`sigmoid_cross_entropy_with_logits` is a slight simplification for binary
classification:
sigmoid(x) = softmax([x, 0])[0]
$$\frac{1}{1 + e^{-x}} = \frac{e^x}{e^x + e^0}$$
While `sigmoid_cross_entropy_with_logits` works for soft binary labels
(probabilities between 0 and 1), it can also be used for binary classification
where the labels are hard. There is an equivalence between all three symbols
in this case, with a probability 0 indicating the second class or 1 indicating
the first class:
>>> sigmoid_logits = tf.constant([1., -1., 0.])
>>> softmax_logits = tf.stack([sigmoid_logits, tf.zeros_like(sigmoid_logits)],
... axis=-1)
>>> soft_binary_labels = tf.constant([1., 1., 0.])
>>> soft_multiclass_labels = tf.stack(
... [soft_binary_labels, 1. - soft_binary_labels], axis=-1)
>>> hard_labels = tf.constant([0, 0, 1])
>>> tf.nn.sparse_softmax_cross_entropy_with_logits(
... labels=hard_labels, logits=softmax_logits).numpy()
array([0.31326166, 1.3132616 , 0.6931472 ], dtype=float32)
>>> tf.nn.softmax_cross_entropy_with_logits(
... labels=soft_multiclass_labels, logits=softmax_logits).numpy()
array([0.31326166, 1.3132616, 0.6931472], dtype=float32)
>>> tf.nn.sigmoid_cross_entropy_with_logits(
... labels=soft_binary_labels, logits=sigmoid_logits).numpy()
array([0.31326166, 1.3132616, 0.6931472], dtype=float32)
Args:
labels: A `Tensor` of the same type and shape as `logits`. Between 0 and 1,
inclusive.
logits: A `Tensor` of type `float32` or `float64`. Any real number.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
return sigmoid_cross_entropy_with_logits(
logits=logits, labels=labels, name=name)
sigmoid_cross_entropy_with_logits.__doc__ = (
sigmoid_cross_entropy_with_logits_v2.__doc__)
@tf_export("nn.weighted_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def weighted_cross_entropy_with_logits_v2(labels, logits, pos_weight,
name=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
labels * -log(sigmoid(logits)) +
(1 - labels) * -log(1 - sigmoid(logits))
A value `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
This can be seen from the fact that `pos_weight` is introduced as a
multiplicative coefficient for the positive labels term
in the loss expression:
labels * -log(sigmoid(logits)) * pos_weight +
(1 - labels) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = labels`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `labels` must have the same type and shape.
>>> labels = tf.constant([1., 0.5, 0.])
>>> logits = tf.constant([1.5, -0.1, -10.])
>>> tf.nn.weighted_cross_entropy_with_logits(
... labels=labels, logits=logits, pos_weight=tf.constant(1.5)).numpy()
array([3.0211994e-01, 8.8049585e-01, 4.5776367e-05], dtype=float32)
>>> tf.nn.weighted_cross_entropy_with_logits(
... labels=labels, logits=logits, pos_weight=tf.constant(0.5)).numpy()
array([1.00706644e-01, 5.08297503e-01, 4.57763672e-05], dtype=float32)
Args:
labels: A `Tensor` of the same type and shape as `logits`, with values
between 0 and 1 inclusive.
logits: A `Tensor` of type `float32` or `float64`, any real numbers.
pos_weight: A coefficient to use on the positive examples, typically a
scalar but otherwise broadcastable to the shape of `logits`. Its value
should be non-negative.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().assert_is_compatible_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
(logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x
# To avoid branching, we use the combined version
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + (pos_weight - 1) * labels
return math_ops.add(
(1 - labels) * logits,
log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) +
nn_ops.relu(-logits)), # pylint: disable=invalid-unary-operand-type
name=name)
@tf_export(v1=["nn.weighted_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecated_args(None, "targets is deprecated, use labels instead", "targets")
def weighted_cross_entropy_with_logits(labels=None,
logits=None,
pos_weight=None,
name=None,
targets=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
labels * -log(sigmoid(logits)) +
(1 - labels) * -log(1 - sigmoid(logits))
A value `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
This can be seen from the fact that `pos_weight` is introduced as a
multiplicative coefficient for the positive labels term
in the loss expression:
labels * -log(sigmoid(logits)) * pos_weight +
(1 - labels) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = labels`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `labels` must have the same type and shape.
Args:
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
targets: Deprecated alias for labels.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
labels = deprecated_argument_lookup("labels", labels, "targets", targets)
return weighted_cross_entropy_with_logits_v2(labels, logits, pos_weight, name)
@tf_export("nn.compute_average_loss")
@dispatch.add_dispatch_support
def compute_average_loss(per_example_loss,
sample_weight=None,
global_batch_size=None):
"""Scales per-example losses with sample_weights and computes their average.
Usage with distribution strategy and custom training loop:
```python
with strategy.scope():
def compute_loss(labels, predictions, sample_weight=None):
# If you are using a `Loss` class instead, set reduction to `NONE` so that
# we can do the reduction afterwards and divide by global batch size.
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
# Compute loss that is scaled by sample_weight and by global batch size.
return tf.nn.compute_average_loss(
per_example_loss,
sample_weight=sample_weight,
global_batch_size=GLOBAL_BATCH_SIZE)
```
Args:
per_example_loss: Per-example loss.
sample_weight: Optional weighting for each example.
global_batch_size: Optional global batch size value. Defaults to (size of
first dimension of `losses`) * (number of replicas).
Returns:
Scalar loss value.
""" # pylint: disable=g-doc-exception
per_example_loss = ops.convert_to_tensor(per_example_loss)
input_dtype = per_example_loss.dtype
with losses_util.check_per_example_loss_rank(per_example_loss):
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight)
per_example_loss = losses_util.scale_losses_by_sample_weight(
per_example_loss, sample_weight)
per_example_loss = math_ops.cast(per_example_loss, input_dtype)
if global_batch_size is None:
if ds.has_strategy() and ds.in_cross_replica_context():
raise RuntimeError(
"You are calling `compute_average_loss` in cross replica context, "
"while it was expected to be called in replica context.")
num_replicas = ds.get_strategy().num_replicas_in_sync
per_replica_batch_size = array_ops.shape_v2(per_example_loss)[0]
global_batch_size = per_replica_batch_size * num_replicas
global_batch_size = math_ops.cast(global_batch_size, input_dtype)
return math_ops.reduce_sum(per_example_loss) / global_batch_size
@tf_export("nn.scale_regularization_loss")
@dispatch.add_dispatch_support
def scale_regularization_loss(regularization_loss):
"""Scales the sum of the given regularization losses by number of replicas.
Usage with distribution strategy and custom training loop:
```python
with strategy.scope():
def compute_loss(self, label, predictions):
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
# Compute loss that is scaled by sample_weight and by global batch size.
loss = tf.nn.compute_average_loss(
per_example_loss,
sample_weight=sample_weight,
global_batch_size=GLOBAL_BATCH_SIZE)
# Add scaled regularization losses.
loss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights))
return loss
```
Args:
regularization_loss: Regularization loss.
Returns:
Scalar loss value.
""" # pylint: disable=g-doc-exception
if ds.has_strategy() and ds.in_cross_replica_context():
raise RuntimeError(
"You are calling `scale_regularization_loss` in cross replica context, "
"while it was expected to be called in replica context.")
num_replicas = ds.get_strategy().num_replicas_in_sync
return math_ops.reduce_sum(regularization_loss) / num_replicas
@tf_export(v1=["nn.relu_layer"])
@dispatch.add_dispatch_support
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
@tf_export("nn.silu", "nn.swish")
@dispatch.add_dispatch_support
@custom_gradient.custom_gradient
def swish(features):
# pylint: disable=g-doc-args
"""Computes the SiLU or Swish activation function: `x * sigmoid(x)`.
The SiLU activation function was introduced in "Gaussian Error Linear Units
(GELUs)" [Hendrycks et al. 2016](https://arxiv.org/abs/1606.08415) and
"Sigmoid-Weighted Linear Units for Neural Network Function Approximation in
Reinforcement Learning"
[Elfwing et al. 2017](https://arxiv.org/abs/1702.03118) and was independently
discovered (and called swish) in "Searching for Activation Functions"
[Ramachandran et al. 2017](https://arxiv.org/abs/1710.05941)
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
# pylint: enable=g-doc-args
features = ops.convert_to_tensor(features, name="features")
def grad(dy):
"""Gradient for the Swish activation function"""
# Naively, x * tf.nn.sigmoid(x) requires keeping both x and sigmoid(x)
# around for backprop, effectively doubling the tensor's memory consumption.
# We use a control dependency here so that sigmoid(features) is re-computed
# during backprop (the control dep prevents it being de-duped with the
# forward pass) and we can free the sigmoid(features) expression immediately
# after use during the forward pass.
with ops.control_dependencies([dy]):
sigmoid_features = math_ops.sigmoid(features)
activation_grad = (
sigmoid_features * (1.0 + features * (1.0 - sigmoid_features)))
return dy * activation_grad
return features * math_ops.sigmoid(features), grad
# pylint: disable=redefined-builtin
@tf_export("linalg.normalize")
@dispatch.add_dispatch_support
def normalize(tensor, ord="euclidean", axis=None, name=None):
"""Normalizes `tensor` along dimension `axis` using specified norm.
This uses `tf.linalg.norm` to compute the norm along `axis`.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`,
`2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for
vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`,
'`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis`
on how to compute norms for a batch of vectors or matrices stored in a
tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the
input is considered a batch of vectors, and `axis` determines the axis in
`tensor` over which to compute vector norms. If `axis` is a 2-tuple of
Python integers it is considered a batch of matrices and `axis` determines
the axes in `tensor` over which to compute a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
name: The name of the op.
Returns:
normalized: A normalized `Tensor` with the same shape as `tensor`.
norm: The computed norms with the same shape and dtype `tensor` but the
final axis is 1 instead. Same as running
`tf.cast(tf.linalg.norm(tensor, ord, axis keepdims=True), tensor.dtype)`.
Raises:
ValueError: If `ord` or `axis` is invalid.
"""
with ops.name_scope(name, "normalize", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor)
norm = linalg_ops.norm(tensor, ord, axis, keepdims=True)
norm = math_ops.cast(norm, tensor.dtype)
normalized = tensor / norm
return normalized, norm
@tf_export("math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize",
v1=["math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
"""Normalizes along dimension `axis` using an L2 norm.
For a 1-D tensor with `axis = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
1-D tensor example:
>>> x = tf.constant([3.0, 4.0])
>>> tf.math.l2_normalize(x).numpy()
array([0.6, 0.8], dtype=float32)
2-D tensor example:
>>> x = tf.constant([[3.0], [4.0]])
>>> tf.math.l2_normalize(x, 0).numpy()
array([[0.6],
[0.8]], dtype=float32)
>>> x = tf.constant([[3.0], [4.0]])
>>> tf.math.l2_normalize(x, 1).numpy()
array([[1.],
[1.]], dtype=float32)
Args:
x: A `Tensor`.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
dim: Deprecated, do not use.
Returns:
A `Tensor` with the same shape as `x`.
"""
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
square_real = math_ops.square(math_ops.real(x))
square_imag = math_ops.square(math_ops.imag(x))
square_sum = math_ops.real(
math_ops.reduce_sum(square_real + square_imag, axis, keepdims=True))
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
norm_real = math_ops.multiply(math_ops.real(x), x_inv_norm)
norm_imag = math_ops.multiply(math_ops.imag(x), x_inv_norm)
return math_ops.complex(norm_real, norm_imag, name=name)
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
def _count_nonzero(input_tensor, dtype=dtypes.int64):
"""Same as math_ops.count_nonzero.
The reduction is done in dtype, which can be faster for 32-bit dtypes.
Args:
input_tensor: numeric tensor
dtype: reduction dtype
Returns:
number of nonzero values with type dtype
"""
with ops.name_scope("count_nonzero", values=[input_tensor]):
zero = array_ops.zeros([], dtype=input_tensor.dtype)
nonzero_count = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(input_tensor, zero),
dtype=dtype), name="nonzero_count")
return nonzero_count
@tf_export("math.zero_fraction", "nn.zero_fraction")
@dispatch.add_dispatch_support
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
size = array_ops.size(value, out_type=dtypes.int64)
# If the count is small, we can save memory/CPU with an int32 reduction.
num_nonzero = control_flow_ops.cond(
size <= dtypes.int32.max,
# pylint: disable=g-long-lambda
true_fn=lambda: math_ops.cast(
_count_nonzero(value, dtype=dtypes.int32),
dtype=dtypes.int64),
false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))
with ops.name_scope("counts_to_fraction"):
num_zero = size - num_nonzero
num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)
size_float32 = math_ops.cast(size, dtype=dtypes.float32)
zero_fraction_float32 = num_zero_float32 / size_float32
return array_ops.identity(zero_fraction_float32, "fraction")
# pylint: disable=redefined-builtin
@tf_export(v1=["nn.depthwise_conv2d"])
@dispatch.add_dispatch_support
def depthwise_conv2d(input,
filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail, with the default NHWC format,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Usage Example:
>>> x = np.array([
... [1., 2.],
... [3., 4.],
... [5., 6.]
... ], dtype=np.float32).reshape((1, 3, 2, 1))
>>> kernel = np.array([
... [1., 2.],
... [3., 4]
... ], dtype=np.float32).reshape((2, 1, 1, 2))
>>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding='VALID').numpy()
array([[[[10., 14.],
[14., 20.]],
[[18., 26.],
[22., 32.]]]], dtype=float32)
>>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]
... ).numpy()
array([[[[ 0., 0.],
[ 3., 4.],
[ 6., 8.]],
[[ 0., 0.],
[10., 14.],
[14., 20.]],
[[ 0., 0.],
[18., 26.],
[22., 32.]]]], dtype=float32)
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: Alias of rate.
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
rate = deprecated_argument_lookup("dilations", dilations, "rate", rate)
with ops.name_scope(name, "depthwise", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
if rate is None:
rate = [1, 1]
# Use depthwise_conv2d_native if executing on TPU.
if device_context.enclosing_tpu_context() is not None:
if data_format == "NCHW":
dilations = [1, 1, rate[0], rate[1]]
else:
dilations = [1, rate[0], rate[1], 1]
return nn_ops.depthwise_conv2d_native(
input=input,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
@tf_export("nn.depthwise_conv2d", v1=[])
@dispatch.add_dispatch_support
def depthwise_conv2d_v2(input,
filter,
strides,
padding,
data_format=None,
dilations=None,
name=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail, with the default NHWC format,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Usage Example:
>>> x = np.array([
... [1., 2.],
... [3., 4.],
... [5., 6.]
... ], dtype=np.float32).reshape((1, 3, 2, 1))
>>> kernel = np.array([
... [1., 2.],
... [3., 4]
... ], dtype=np.float32).reshape((2, 1, 1, 2))
>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding='VALID').numpy()
array([[[[10., 14.],
[14., 20.]],
[[18., 26.],
[22., 32.]]]], dtype=float32)
>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]).numpy()
array([[[[ 0., 0.],
[ 3., 4.],
[ 6., 8.]],
[[ 0., 0.],
[10., 14.],
[14., 20.]],
[[ 0., 0.],
[18., 26.],
[22., 32.]]]], dtype=float32)
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
return depthwise_conv2d(input=input,
filter=filter,
strides=strides,
padding=padding,
rate=dilations,
name=name,
data_format=data_format)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin,line-too-long
@tf_export(v1=["nn.separable_conv2d"])
@dispatch.add_dispatch_support
def separable_conv2d(input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: Controls how to pad the image before applying the depthwise
convolution. Can be the string `"SAME"` or `"VALID"` indicating the type
of padding algorithm to use, or a Python list indicating the explicit
paddings at the start and end of each dimension. When explicit padding is
used and data_format is `"NHWC"`, this should be in the form `[[0, 0],
[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit
padding used and data_format is `"NCHW"`, this should be in the form
`[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: Alias of rate.
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
rate = deprecated_argument_lookup("dilations", dilations, "rate", rate)
with ops.name_scope(name, "separable_conv2d",
[input, depthwise_filter, pointwise_filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(
depthwise_filter, name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(
pointwise_filter, name="pointwise_filter")
pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4)
pointwise_filter_shape.dims[0].assert_is_compatible_with(1)
pointwise_filter_shape.dims[1].assert_is_compatible_with(1)
if rate is None:
rate = [1, 1]
# The layout of the ops in the graph are expected to be as follows:
# depthwise_conv2d // Conv2D op corresponding to native depthwise conv.
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=depthwise_filter,
strides=strides,
padding=padding,
data_format=data_format,
name="depthwise")
depthwise = nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(depthwise_filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
return nn_ops.conv2d(
depthwise,
pointwise_filter, [1, 1, 1, 1],
padding="VALID",
data_format=data_format,
name=name)
@tf_export("nn.separable_conv2d", v1=[])
@dispatch.add_dispatch_support
def separable_conv2d_v2(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
data_format=None,
dilations=None,
name=None,
):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width,
in_channels, channel_multiplier]`. Contains `in_channels` convolutional
filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier *
in_channels, out_channels]`. Pointwise filter to mix channels after
`depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for each
dimension of `input`.
padding: Controls how to pad the image before applying the depthwise
convolution. Can be the string `"SAME"` or `"VALID"` indicating the type
of padding algorithm to use, or a Python list indicating the explicit
paddings at the start and end of each dimension. When explicit padding is
used and data_format is `"NHWC"`, this should be in the form `[[0, 0],
[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit
padding used and data_format is `"NCHW"`, this should be in the form
`[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
return separable_conv2d(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=dilations,
name=name,
data_format=data_format)
# pylint: enable=redefined-builtin,line-too-long
@tf_export(v1=["nn.sufficient_statistics"])
@dispatch.add_dispatch_support
def sufficient_statistics(x, axes, shift=None, keep_dims=None, name=None,
keepdims=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
For example:
>>> t = [[1, 2, 3], [4, 5, 6]]
>>> sufficient_statistics(t, [1])
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([ 6, 15], dtype=int32)>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([14, 77], dtype=int32)>, None)
>>> sufficient_statistics(t, [-1])
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([ 6, 15], dtype=int32)>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([14, 77], dtype=int32)>, None)
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance. As in
Python, the axes can also be negative numbers. A negative axis is
interpreted as counting from the end of the rank, i.e., axis +
rank(values)-th dimension.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
keepdims: Alias for keep_dims.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if x_shape.rank is not None and all(
x_shape.dims[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape.dims[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
# Normalize axes to be positive. Required for gather.
rank = array_ops.rank(x)
positive_axes = [axis + rank if axis < 0 else axis for axis in axes]
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), positive_axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keepdims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keepdims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
@tf_export("nn.sufficient_statistics", v1=[])
@dispatch.add_dispatch_support
def sufficient_statistics_v2(x, axes, shift=None, keepdims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keepdims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
return sufficient_statistics(
x=x, axes=axes, shift=shift, keep_dims=keepdims, name=name)
@tf_export("nn.normalize_moments")
@dispatch.add_dispatch_support
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(
math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
@tf_export(v1=["nn.moments"])
@dispatch.add_dispatch_support
def moments(
x,
axes,
shift=None, # pylint: disable=unused-argument
name=None,
keep_dims=None,
keepdims=None):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
keepdims: Alias to keep_dims.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "moments", [x, axes]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keepdims=True, name="mean")
# sample variance, not unbiased variance
# Note: stop_gradient does not change the gradient that gets
# backpropagated to the mean from the variance calculation,
# because that gradient is zero
variance = math_ops.reduce_mean(
math_ops.squared_difference(y, array_ops.stop_gradient(mean)),
axes,
keepdims=True,
name="variance")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(variance, dtypes.float16))
else:
return (mean, variance)
@tf_export("nn.moments", v1=[])
@dispatch.add_dispatch_support
def moments_v2(
x,
axes,
shift=None,
keepdims=False,
name=None):
"""Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation.
keepdims: produce moments with the same dimensionality as the input.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
return moments(x=x, axes=axes, shift=shift, name=name, keep_dims=keepdims)
@tf_export(v1=["nn.weighted_moments"])
@dispatch.add_dispatch_support
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=None,
keepdims=None):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
keepdims: Alias of keep_dims.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name="x")
frequency_weights = ops.convert_to_tensor(
frequency_weights, name="frequency_weights")
# Unlike moments(), this just uses a simpler two-pass method.
# See comment in moments() WRT precision; it applies here too.
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
# Note that we use keep_dims=True for our reductions regardless of the arg;
# this is so that the results remain broadcast-compatible with the inputs.
weighted_input_sum = math_ops.reduce_sum(
frequency_weights * x, axes, name="weighted_input_sum", keepdims=True)
# The shape of the weights isn't necessarily the same as x's
# shape, just broadcast-compatible with it -- so this expression
# performs broadcasting to give a per-item weight, with the same
# shape as (frequency_weights * x). This avoids having to reason
# through all the broadcast logic to compute a correct
# sum_of_weights.
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(
broadcasted_weights, axes, name="sum_of_weights", keepdims=True)
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
# Have the weighted mean; now on to variance:
weighted_distsq = math_ops.reduce_sum(
frequency_weights * math_ops.squared_difference(x, weighted_mean),
axes,
name="weighted_distsq",
keepdims=True)
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, axis=axes)
weighted_variance = array_ops.squeeze(
weighted_variance, axis=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return weighted_mean, weighted_variance
@tf_export("nn.weighted_moments", v1=[])
@dispatch.add_dispatch_support
def weighted_moments_v2(x, axes, frequency_weights, keepdims=False, name=None):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
keepdims: Produce moments with the same dimensionality as the input.
name: Name used to scope the operation.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
return weighted_moments(
x=x,
axes=axes,
frequency_weights=frequency_weights,
name=name,
keep_dims=keepdims)
@tf_export("nn.batch_normalization")
@dispatch.add_dispatch_support
def batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Batch normalization.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
* In all generality, they can have the same number of dimensions as the
input `x`, with identical sizes as `x` for the dimensions that are not
normalized over (the 'depth' dimension(s)), and dimension 1 for the
others which are being normalized over.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keepdims=True)` during training, or running averages
thereof during inference.
* In the common case where the 'depth' dimension is the last dimension in
the input tensor `x`, they may be one dimensional tensors of the same
size as the 'depth' dimension.
This is the case for example for the common `[batch, depth]` layout of
fully-connected layers, and `[batch, height, width, depth]` for
convolutions.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keepdims=False)` during training, or running averages
thereof during inference.
See equation 11 in Algorithm 2 of source:
[Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
Returns:
the normalized, scaled, offset tensor.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://arxiv.org/abs/1502.03167)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
# Note: tensorflow/contrib/quantize/python/fold_batch_norms.py depends on
# the precise order of ops that are generated by the expression below.
return x * math_ops.cast(inv, x.dtype) + math_ops.cast(
offset - mean * inv if offset is not None else -mean * inv, x.dtype)
@tf_export(v1=["nn.fused_batch_norm"])
@dispatch.add_dispatch_support
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None,
exponential_avg_factor=1.0):
r"""Batch normalization.
See Source: [Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
Args:
x: Input `Tensor` of 4 or 5 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean. The shape and meaning
of this argument depends on the value of is_training and
exponential_avg_factor as follows:
is_training==False (inference):
Mean must be a `Tensor` of the same shape as scale containing the
estimated population mean computed during training.
is_training==True and exponential_avg_factor == 1.0:
Mean must be None.
is_training==True and exponential_avg_factor != 1.0:
Mean must be a `Tensor` of the same shape as scale containing the
exponential running mean.
variance: A `Tensor` of 1 dimension for population variance. The shape and
meaning of this argument depends on the value of is_training and
exponential_avg_factor as follows:
is_training==False (inference):
Variance must be a `Tensor` of the same shape as scale containing
the estimated population variance computed during training.
is_training==True and exponential_avg_factor == 1.0:
Variance must be None.
is_training==True and exponential_avg_factor != 1.0:
Variance must be a `Tensor` of the same shape as scale containing
the exponential running variance.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Support "NHWC" (default) or "NCHW" for
4D tenors and "NDHWC" or "NCDHW" for 5D tensors.
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
exponential_avg_factor: A float number (usually between 0 and 1) used
for controlling the decay of the running
population average of mean and variance.
If set to 1.0, the current batch average is
returned.
Returns:
y: A 4D or 5D Tensor for the normalized, scaled, offsetted x.
running_mean: A 1D Tensor for the exponential running mean of x.
The output value is (1 - exponential_avg_factor) * mean +
exponential_avg_factor * batch_mean), where batch_mean
is the mean of the current batch in x.
running_var: A 1D Tensor for the exponential running variance
The output value is (1 - exponential_avg_factor) * variance +
exponential_avg_factor * batch_variance), where batch_variance
is the variance of the current batch in x.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
if (not is_training or exponential_avg_factor != 1.0) and (
(mean is None) or (variance is None)):
raise ValueError("Both 'mean' and 'variance' must be a 1D tensor when "
"is_training is False or "
"exponential_avg_factor != 1.0.")
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Set a minimum epsilon to 1.001e-5, which is a requirement by CUDNN to
# prevent exception (see cudnn.h).
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training,
name=name)
return y, running_mean, running_var
@tf_export(v1=["nn.batch_norm_with_global_normalization"])
@dispatch.add_dispatch_support
def batch_norm_with_global_normalization(t=None,
m=None,
v=None,
beta=None,
gamma=None,
variance_epsilon=None,
scale_after_normalization=None,
name=None,
input=None, # pylint: disable=redefined-builtin
mean=None,
variance=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
input: Alias for t.
mean: Alias for m.
variance: Alias for v.
Returns:
A batch-normalized `t`.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
t = deprecated_argument_lookup("input", input, "t", t)
m = deprecated_argument_lookup("mean", mean, "m", m)
v = deprecated_argument_lookup("variance", variance, "v", v)
return batch_normalization(t, m, v, beta, gamma if scale_after_normalization
else None, variance_epsilon, name)
# pylint: disable=redefined-builtin,line-too-long
@tf_export("nn.batch_norm_with_global_normalization", v1=[])
@dispatch.add_dispatch_support
def batch_norm_with_global_normalization_v2(input,
mean,
variance,
beta,
gamma,
variance_epsilon,
scale_after_normalization,
name=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
input: A 4D input Tensor.
mean: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
variance: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
Returns:
A batch-normalized `t`.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
return batch_norm_with_global_normalization(t=input,
m=mean,
v=variance,
beta=beta,
gamma=gamma,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
# pylint: enable=redefined-builtin,line-too-long
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.stack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None,
seed=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The (possibly-partitioned)
class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
Returns:
out_logits: `Tensor` object with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
out_labels: A Tensor object with the same shape as `out_logits`.
"""
if isinstance(weights, variables.PartitionedVariable):
weights = list(weights)
if not isinstance(weights, list):
weights = [weights]
with ops.name_scope(name, "compute_sampled_logits",
weights + [biases, inputs, labels]):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes,
seed=seed)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = (
array_ops.stop_gradient(s) for s in sampled_values)
# pylint: enable=unpacking-non-sequence
sampled = math_ops.cast(sampled, dtypes.int64)
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat([labels_flat, sampled], 0)
# Retrieve the true weights and the logits of the sampled weights.
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
if all_w.dtype != inputs.dtype:
all_w = math_ops.cast(all_w, inputs.dtype)
# true_w shape is [batch_size * num_true, dim]
true_w = array_ops.slice(all_w, [0, 0],
array_ops.stack(
[array_ops.shape(labels_flat)[0], -1]))
sampled_w = array_ops.slice(
all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# Apply X*W', which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True)
# Retrieve the true and sampled biases, compute the true logits, and
# add the biases to the true and sampled logits.
all_b = embedding_ops.embedding_lookup(
biases, all_ids, partition_strategy=partition_strategy)
if all_b.dtype != inputs.dtype:
all_b = math_ops.cast(all_b, inputs.dtype)
# true_b is a [batch_size * num_true] tensor
# sampled_b is a [num_sampled] float tensor
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_logits += sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
"sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1],
array_ops.expand_dims(num_sampled, 0)], 0)
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += gen_sparse_ops.sparse_to_dense(
sparse_indices,
sampled_logits_shape,
acc_weights,
default_value=0.0,
validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float
# tensor of ones. We then divide by num_true to ensure the per-example
# labels sum to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
return out_logits, out_labels
@tf_export("nn.nce_loss", v1=[])
@dispatch.add_dispatch_support
def nce_loss_v2(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical
models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms
Reference](https://www.tensorflow.org/extras/candidate_sampling.pdf)
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference as in the following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: when doing embedding lookup on `weights` and `bias`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape [num_classes,
dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The
target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of
the input network.
num_sampled: An `int`. The number of negative classes to randomly sample
per batch. This single sample of negative classes is evaluated for each
element in the batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to `True`,
this is a "Sampled Logistic" loss instead of NCE, and we are learning to
generate log-odds instead of log probabilities. See our [Candidate
Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf). Default is
False.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
# TODO(yuefengz): get partition_strategy from either variables or distribution
# strategies.
return nce_loss(
weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy="div",
name=name)
@tf_export(v1=["nn.nce_loss"])
@dispatch.add_dispatch_support
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of negative classes to randomly sample
per batch. This single sample of negative classes is evaluated for each
element in the batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our Candidate Sampling Algorithms Reference
([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
References:
Noise-contrastive estimation - A new estimation principle for unnormalized
statistical models:
[Gutmann et al., 2010](http://proceedings.mlr.press/v9/gutmann10a)
([pdf](http://proceedings.mlr.press/v9/gutmann10a/gutmann10a.pdf))
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
@tf_export("nn.sampled_softmax_loss", v1=[])
@dispatch.add_dispatch_support
def sampled_softmax_loss_v2(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
seed=None,
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference as in the following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Note: when doing embedding lookup on `weights` and `bias`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape [num_classes,
dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The
target classes. Note that this format differs from the `labels` argument
of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of
the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is True.
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
return sampled_softmax_loss(
weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy="div",
name=name,
seed=seed)
@tf_export(v1=["nn.sampled_softmax_loss"])
@dispatch.add_dispatch_support
def sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss",
seed=None):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our Candidate Sampling Algorithms Reference
([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)).
Also see Section 3 of (Jean et al., 2014) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
References:
On Using Very Large Target Vocabulary for Neural Machine Translation:
[Jean et al., 2014]
(https://aclanthology.coli.uni-saarland.de/papers/P15-1001/p15-1001)
([pdf](http://aclweb.org/anthology/P15-1001))
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name,
seed=seed)
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
sampled_losses = nn_ops.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
|
sarvex/tensorflow
|
tensorflow/python/ops/nn_impl.py
|
Python
|
apache-2.0
| 100,241
|
[
"Gaussian"
] |
316a1b3d655b70082d296c8bfbcf80b6bd3b592e4a1a59a2ab1e6f8020701f54
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
import os
import threading
import zmq
from zmq.tests import BaseZMQTestCase
from zmq.eventloop import ioloop
from zmq.eventloop.minitornado.ioloop import _Timeout
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def printer():
os.system("say hello")
raise Exception
print (time.time())
class Delay(threading.Thread):
def __init__(self, f, delay=1):
self.f=f
self.delay=delay
self.aborted=False
self.cond=threading.Condition()
super(Delay, self).__init__()
def run(self):
self.cond.acquire()
self.cond.wait(self.delay)
self.cond.release()
if not self.aborted:
self.f()
def abort(self):
self.aborted=True
self.cond.acquire()
self.cond.notify()
self.cond.release()
class TestIOLoop(BaseZMQTestCase):
def test_simple(self):
"""simple IOLoop creation test"""
loop = ioloop.IOLoop()
dc = ioloop.DelayedCallback(loop.stop, 200, loop)
pc = ioloop.DelayedCallback(lambda : None, 10, loop)
pc.start()
dc.start()
t = Delay(loop.stop,1)
t.start()
loop.start()
if t.isAlive():
t.abort()
else:
self.fail("IOLoop failed to exit")
def test_timeout_compare(self):
"""test timeout comparisons"""
loop = ioloop.IOLoop()
t = _Timeout(1, 2, loop)
t2 = _Timeout(1, 3, loop)
self.assertEqual(t < t2, id(t) < id(t2))
t2 = _Timeout(2,1, loop)
self.assertTrue(t < t2)
def test_poller_events(self):
"""Tornado poller implementation maps events correctly"""
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
poller = ioloop.ZMQPoller()
poller.register(req, ioloop.IOLoop.READ)
poller.register(rep, ioloop.IOLoop.READ)
events = dict(poller.poll(0))
self.assertEqual(events.get(rep), None)
self.assertEqual(events.get(req), None)
poller.register(req, ioloop.IOLoop.WRITE)
poller.register(rep, ioloop.IOLoop.WRITE)
events = dict(poller.poll(1))
self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
self.assertEqual(events.get(rep), None)
poller.register(rep, ioloop.IOLoop.READ)
req.send(b'hi')
events = dict(poller.poll(1))
self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
self.assertEqual(events.get(req), None)
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/tests/test_ioloop.py
|
Python
|
lgpl-3.0
| 3,188
|
[
"Brian"
] |
deb83e74812f2e56fe159f31e28533f2669290b65de24c56120c4df2c7678470
|
import sys
from setuptools import setup
long_description = '''\
Raven Sentry client for Bash.
Logs error if one of your commands exits with non-zero return code and produces simple traceback for
easier debugging. It also tries to extract last values of the variables visible in the traceback.
Environment variables and stderr output are also included.
For more information please visit project repo on GitHub: https://github.com/hareevs/raven-bash
'''
install_requires = ['raven>=5.1.1']
if sys.version_info[:2] < (3, 0):
install_requires.append('configparser')
setup(
name='raven-bash',
version='1.0',
description='Raven Sentry client for Bash.',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
],
keywords='raven sentry bash',
author='Viktor Stiskala',
author_email='viktor@stiskala.cz',
url='https://github.com/hareevs/raven-bash',
license='Apache License 2.0',
install_requires=install_requires,
packages=['logger'],
package_data={'logger': ['raven-bash', 'logger/*.py']},
entry_points={
'console_scripts': [
'raven-logger=logger.raven_logger:main',
],
},
scripts=['raven-bash'],
zip_safe=False
)
|
hareevs/raven-bash
|
setup.py
|
Python
|
apache-2.0
| 1,462
|
[
"VisIt"
] |
6c25b1d225e9f2265f4134f4ab1c82e2c38be843eb9aa766a141ad5316ba87f5
|
import datetime
import time
from decimal import Decimal
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from statisticscore import countries, session_types
# The following imports are used to process images for faster loading times
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToCover
SESSION_NAME_LEN=100
SESSION_DESCRIPTION_LEN=200
SESSION_AUTHOR_LEN=100
SESSION_LICENCE_LEN=100
TOPIC_AREA_LEN=200
COMMITTEE_NAME_MAX=8
class Session(models.Model):
name = models.CharField(max_length=SESSION_NAME_LEN)
description = models.CharField(max_length=SESSION_DESCRIPTION_LEN)
#Session size
session_type = models.CharField(max_length=3, choices=session_types.SESSION_TYPES, default=session_types.REGIONAL_SESSION)
picture = models.ImageField(upload_to='session_pictures/')
# Session picture used on front-page to help loading times
picture_thumbnail = ImageSpecField(source='picture',
processors=[ResizeToCover(400, 400)],
format='JPEG',
options={'quality': 80})
# Session picture used on session page to help loading times and still acceptable image quality
picture_large_fast = ImageSpecField(source='picture',
processors=[ResizeToCover(1280, 400)],
format='JPEG',
options={'quality': 100})
# Session picture author link allows users to credit photographers e.g. for Creative Commons content
picture_author = models.CharField(max_length=SESSION_AUTHOR_LEN, blank=True)
picture_author_link = models.URLField(blank=True)
picture_licence = models.CharField(max_length=SESSION_LICENCE_LEN, blank=True)
picture_license_link = models.URLField(blank=True)
email = models.EmailField()
# The following links will be displayed on the sessions main page if a link is provided
resolution_link = models.URLField(blank=True)
website_link = models.URLField(blank=True)
facebook_link = models.URLField(blank=True)
twitter_link = models.URLField(blank=True)
topic_overview_link = models.URLField(blank=True)
country = models.CharField(max_length=2, choices=countries.SESSION_COUNTRIES, default=countries.ALBANIA)
#Date Options
start_date = models.DateTimeField('start date')
end_date = models.DateTimeField('end date')
#Setting up statistic types
STATISTICS = 'S'
CONTENT = 'C'
JOINTFORM = 'JF'
SPLITFORM = 'SF'
RUNNINGORDER = 'R'
RUNNINGCONTENT = 'RC'
STATISTIC_TYPES = (
(STATISTICS, 'Statistics Only'),
(CONTENT, 'Point Content Only'),
(JOINTFORM, 'Joint Form Statistics'),
(SPLITFORM, 'Split Form Statistics'),
(RUNNINGORDER, 'Running Order Statistics'),
(RUNNINGCONTENT, 'Running Order Statistics with Point Content')
)
session_statistics = models.CharField(max_length=5, choices=STATISTIC_TYPES, default=JOINTFORM)
is_visible = models.BooleanField('is visible')
voting_enabled = models.BooleanField('session-wide voting enabled', default=True)
gender_enabled = models.BooleanField('gender statistics enabled', default=False)
max_rounds = models.PositiveSmallIntegerField(default=3)
gender_number_female = models.IntegerField(blank=True, null=True)
gender_number_male = models.IntegerField(blank=True, null=True)
gender_number_other = models.IntegerField(blank=True, null=True)
# If the session has had technical problems some data is probably missing. If this is activated a message will be shown to indidate this.
has_technical_problems = models.BooleanField('session has technical problems', default=False)
#Defining two users for the session. The Admin user who can alter active debates, change points etc. and the
#submit user, which will be the login for everyone at any given session who wants to submit a point.
admin_user = models.ForeignKey(
User,
related_name='session_admin',
blank=True,
null=True,
on_delete=models.CASCADE
)
submission_user = models.ForeignKey(
User,
related_name='session_submit',
blank=True,
null=True,
on_delete=models.CASCADE
)
def __str__(self):
return str(self.name)
def session_ongoing(self):
return (self.start_date <= timezone.now() and self.end_date >= timezone.now())
session_ongoing.admin_order_field = 'start_date'
session_ongoing.boolean = True
session_ongoing.short_description = 'Session Ongoing'
def session_latest_activity(self):
"""
Returns date and time of the latest activity of the session. If there was never any activity, return 1972 as latest activity.
"""
initialising_datetime = timezone.make_aware(datetime.datetime(1972, 1, 1, 2), timezone.get_default_timezone())
latest_point = initialising_datetime
latest_content = initialising_datetime
latest_vote = initialising_datetime
if Point.objects.filter(session=self):
latest_point = Point.objects.filter(session=self).order_by('-timestamp')[0].timestamp
if ContentPoint.objects.filter(session=self):
latest_content = ContentPoint.objects.filter(session=self).order_by('-timestamp')[0].timestamp
if Vote.objects.filter(session=self):
latest_vote = Vote.objects.filter(session=self).order_by('-timestamp')[0].timestamp
# This sorts the list of datetimes and the latest datetime is the third element of the list, which is saved to latest_activity
latest_activity = sorted([latest_vote, latest_point, latest_content])[2]
if latest_activity > initialising_datetime:
return latest_activity
else:
return False
def minutes_per_point(self):
if self.session_statistics != 'C':
all_points = Point.objects.filter(session=self).order_by('timestamp')
else:
all_points = ContentPoint.objects.filter(session=self).order_by('timestamp')
if all_points.count() == 0:
return 0
total_points = all_points.count()
first_point = all_points.first().timestamp
latest_point = all_points.last().timestamp
time_diff = latest_point - first_point
minutes = (time_diff.days * 1440) + (time_diff.seconds / 60)
return Decimal(minutes) / Decimal(total_points)
class ActiveDebate(models.Model):
session = models.ForeignKey(Session, on_delete=models.CASCADE)
active_debate = models.CharField(max_length=8, blank=True, null=True)
def __str__(self):
return str(self.active_debate)
class ActiveRound(models.Model):
session = models.ForeignKey(Session, on_delete=models.CASCADE)
active_round = models.PositiveSmallIntegerField(null=True, blank=True)
def __int__(self):
return str(self.active_round)
class Announcement(models.Model):
content = models.TextField()
timestamp = models.DateTimeField(auto_now=True)
valid_until = models.DateTimeField()
SUCCESS = 'alert-success'
INFO = 'alert-info'
WARNING = 'alert-warning'
DANGER = 'alert-danger'
ANNOUNCEMENT_TYPES = (
(SUCCESS, 'Success'),
(INFO, 'Info'),
(WARNING, 'Warning'),
(DANGER, 'Danger'),
)
announcement_type = models.CharField(max_length=15, choices=ANNOUNCEMENT_TYPES, default=INFO)
def __str__(self):
return str(self.announcement_type + self.content)
# Defining a committee, there should be several of these connected with each session.
class Committee(models.Model):
session = models.ForeignKey(Session, on_delete=models.CASCADE)
# Currently updating both until topics are stable
topic_text = models.TextField()
def get_topic(self):
return self.statisticstopicplace.topic
name = models.CharField(max_length=COMMITTEE_NAME_MAX)
next_subtopics = models.ManyToManyField('SubTopic', blank=True, related_name='next_subtopics+')
#We want to define an automatic color for the committee in question, based on the list of material design colors.
def committee_color(self):
color_id = self.pk % 17
if color_id == 1:
return('red')
elif color_id == 2:
return('green')
elif color_id == 3:
return('yellow')
elif color_id == 4:
return('blue')
elif color_id == 5:
return('purple')
elif color_id == 6:
return('light-green')
elif color_id == 7:
return('orange')
elif color_id == 8:
return('cyan')
elif color_id == 9:
return('pink')
elif color_id == 10:
return('lime')
elif color_id == 11:
return('deep-orange')
elif color_id == 12:
return('light-blue')
elif color_id == 13:
return('deep-purple')
elif color_id == 14:
return('amber')
elif color_id == 15:
return('teal')
elif color_id == 16:
return('indigo')
else:
return('blue-grey')
#Then we need a text color depending on if the committee color is light or dark.
def committee_text_color(self):
if self.committee_color() in ['cyan', 'light-green', 'lime', 'yellow', 'amber', 'orange']:
return('black')
else:
return('white')
def voting_successful(self):
votes = Vote.objects.filter(session=self.session).filter(active_debate=self.name)
total = 0
in_favour = 0
absent = 0
if len(votes) == 0:
return False
for vote in votes:
total += vote.total_votes()
in_favour += vote.in_favour
absent += vote.absent
return in_favour >= (total - absent) / 2
def num_drs(self):
points = Point.objects.filter(session=self.session).filter(active_debate=self.name)
drs = 0
for point in points:
if point.point_type == 'DR':
drs += 1
return drs
def num_points(self):
points = Point.objects.filter(session=self.session).filter(active_debate=self.name)
return len(points)
def cleaned_name(self):
"""
This returns the name of the committee without its enumeration to make it easier to use for categorisation
"""
return self.name[:4]
#Defining how the committee will be displayed in a list.
def __str__(self):
return str(self.name)
class Topic(models.Model):
text = models.TextField(unique=True)
CREATIVE = 'CR'
CONFLICT = 'CF'
STRATEGY = 'ST'
TOPIC_TYPES = (
(CREATIVE, 'Creative'),
(CONFLICT, 'Conflict'),
(STRATEGY, 'Strategy')
)
type = models.CharField(max_length=2, choices=TOPIC_TYPES, blank=True, null=True)
area = models.CharField(max_length=TOPIC_AREA_LEN, blank=True, null=True)
EASY = 'E'
INTERMEDIATE = 'I'
HARD = 'H'
DIFFICULTIES = (
(EASY, 'Easy'),
(INTERMEDIATE, 'Intermediate'),
(HARD, 'Hard')
)
difficulty = models.CharField(max_length=1, choices=DIFFICULTIES, blank=True, null=True)
def __str__(self):
return self.text
class TopicPlace(models.Model):
topic = models.ForeignKey(Topic, models.CASCADE)
def child_method(self, method_name):
try:
method = getattr(self.statisticstopicplace, method_name)
return method()
except ObjectDoesNotExist:
method = getattr(self.historictopicplace, method_name)
return method()
def session_type(self):
return self.child_method('session_type')
def committee_name(self):
return self.child_method('committee_name')
def year(self):
return self.child_method('year')
def country(self):
return self.child_method('country')
def __str__(self):
return self.child_method('__str__')
class StatisticsTopicPlace(TopicPlace):
committee = models.OneToOneField(Committee, models.SET_NULL, null=True)
def session_name(self):
if not self.committee:
return ''
return self.committee.session.name
def session_type(self):
if not self.committee:
return ''
return self.committee.session.session_type
def committee_name(self):
if not self.committee:
return ''
return self.committee.name.split(' ')[0]
def year(self):
if not self.committee:
return ''
return self.committee.session.end_date.year
def country(self):
if not self.committee:
return ''
return self.committee.session.country
def __str__(self):
if not self.committee:
return ''
return self.committee.session.name
class HistoricTopicPlace(TopicPlace):
historic_date = models.DateField(blank=True, null=True)
historic_country = models.CharField(max_length=2, choices=countries.SESSION_COUNTRIES, blank=True, null=True)
historic_session_type = models.CharField(max_length=3, choices=session_types.SESSION_TYPES, blank=True, null=True)
historic_committee_name = models.CharField(max_length=COMMITTEE_NAME_MAX)
def session_type(self):
return self.historic_session_type
def committee_name(self):
return self.historic_committee_name
def year(self):
if self.historic_date is not None:
return self.historic_date.year
return None
def country(self):
return self.historic_country
def __str__(self):
string = ''
if self.get_historic_country_display() is not None:
string += self.get_historic_country_display()
if self.historic_session_type is not None:
if string != '':
string += ' - '
string += self.historic_session_type
if self.historic_committee_name is not None:
if string != '':
string += ' - '
string += self.historic_committee_name
if self.historic_date is not None:
if string != '':
string += ' - '
string += str(self.historic_date.year)
return string
#Defining subtopics of a committee, there should ideally be between 3 and 7 of these, plus a "general" subtopic.
class SubTopic(models.Model):
# Which session the subtopic is connected to (to prevent dupicate problems)
# TODO: Remove this
session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.CASCADE)
#Which committee within the session the subtopic should be connected to.
committee = models.ForeignKey(Committee, blank=True, null=True, on_delete=models.CASCADE)
#Name/Text of the subtopic. Should be short and catchy.
text = models.CharField(max_length=200, blank=True, null=True)
#We want to define an automatic color for the committee in question, based on the list of material design colors.
def subtopic_color(self):
color_id = self.pk%17
if color_id == 1:
return('red')
elif color_id == 2:
return('green')
elif color_id == 3:
return('yellow')
elif color_id == 4:
return('blue')
elif color_id == 5:
return('purple')
elif color_id == 6:
return('light-green')
elif color_id == 7:
return('orange')
elif color_id == 8:
return('cyan')
elif color_id == 9:
return('pink')
elif color_id == 10:
return('lime')
elif color_id == 11:
return('deep-orange')
elif color_id == 12:
return('light-blue')
elif color_id == 13:
return('deep-purple')
elif color_id == 14:
return('amber')
elif color_id == 15:
return('teal')
elif color_id == 16:
return('indigo')
else:
return('blue-grey')
#Then we need a text color depending on if the committee color is light or dark.
def text_color(self):
if self.subtopic_color() in ['cyan', 'light-green', 'lime', 'yellow', 'amber', 'orange']:
return('black')
else:
return('white')
#Defining what should be displayed in the admin list, it should be the suptopic text.
def __str__(self):
return str(self.text)
#Defining a Point, which is one peice of data that is submitted for every point of debate.
class Point(models.Model):
#Which session the point should be connected to.
session = models.ForeignKey(Session, on_delete=models.CASCADE)
#Timestamp of when the point was last updated.
timestamp = models.DateTimeField(auto_now=True)
#Which committee the point was by.
committee_by = models.ForeignKey(Committee, on_delete=models.CASCADE)
#Which was the active debate at the time the point was made.
active_debate = models.CharField(max_length=8, blank=True, null=True)
#Which was the Active Round at the time the point was made.
active_round = models.PositiveSmallIntegerField(null=True, blank=True)
#Defining the two point types, Point and Direct Response, the default will be Point.
POINT = 'P'
DIRECT_RESPONSE = 'DR'
POINT_TYPES = (
(POINT, 'Point'),
(DIRECT_RESPONSE, 'Direct Response'),
)
point_type = models.CharField(max_length=5, choices=POINT_TYPES, default=POINT)
#Saying that many subtopics can be connected to this point.
subtopics = models.ManyToManyField(SubTopic, blank=True)
#Definition of the point in an admin list will be the point type, "P" or "DR"
def __str__(self):
return str(self.point_type)
#For the running order, we need to set up a queueing system we can access at any point.
class RunningOrder(models.Model):
#The running order has to be affiliated with a certain session
session = models.ForeignKey(Session, on_delete=models.CASCADE)
#and it needs a position
position = models.PositiveSmallIntegerField()
#then we need to know which committee it is that wants to make a point
committee_by = models.ForeignKey(Committee, on_delete=models.CASCADE)
#Finally we need to know what kind of point it is.
POINT = 'P'
DIRECT_RESPONSE = 'DR'
POINT_TYPES = (
(POINT, 'Point'),
(DIRECT_RESPONSE, 'Direct Response'),
)
point_type = models.CharField(max_length=5, choices=POINT_TYPES, default=POINT)
#Creating the second kind of point, the content point, which contains the text of a given point. Based on Wolfskaempfs GA Stats.
class ContentPoint(models.Model):
#The ContentPoint also needs to be affiliated with a certain session and committee
#(which committee it was made by and which debate was active) in the same way as the statistic points.
session = models.ForeignKey(Session, on_delete=models.CASCADE)
committee_by = models.ForeignKey(Committee, on_delete=models.CASCADE)
active_debate = models.CharField(max_length=8)
#It's also to have a timestamp of when the content point was last edited
timestamp = models.DateTimeField(auto_now=True)
#Then we need the actual point content, which is a simple TextField.
point_content = models.TextField()
#Defining the two point types, Point and Direct Response, the default will be Point.
POINT = 'P'
DIRECT_RESPONSE = 'DR'
POINT_TYPES = (
(POINT, 'Point'),
(DIRECT_RESPONSE, 'Direct Response'),
)
point_type = models.CharField(max_length=5, choices=POINT_TYPES, default=POINT)
#We can also add a definition for showing in admin panels etc.
def __str__(self):
return str(self.point_content)
#Defining the voting class, one "vote" is filled in for each voting committee on each topic.
class Vote(models.Model):
#Which session the Vote should be connected to.
session = models.ForeignKey(Session, on_delete=models.CASCADE)
#Timestamp of when the vote was last updated
timestamp = models.DateTimeField(auto_now=True)
#Which debate was active when the vote was submitted
active_debate = models.CharField(max_length=8)
#Which committee the vote was by
committee_by = models.ForeignKey(Committee, on_delete=models.CASCADE)
#How many votes there were in favour
in_favour = models.PositiveSmallIntegerField()
#How many votes there were against
against = models.PositiveSmallIntegerField()
#How many abstentions there were
abstentions = models.PositiveSmallIntegerField()
#How many delegates were absent, very important so that the total amount of votes in the end
#always displays the same number
absent = models.PositiveSmallIntegerField()
#Definition of the vote in admin lists should be the committee who voted
def __str__(self):
return str(self.committee_by)
#The definition of the total votes, which is the sum of all the vote types.
def total_votes(self):
return (self.in_favour + self.against + self.abstentions + self.absent)
total_votes.integer = True
total_votes.short_description = 'Total Votes'
#Defining the gender class, which is an optional tracking aspect of GA stats shown on each sessions admin page
class Gender(models.Model):
#The gender needs to be connected to a session, the committee that was active at the time and the gender
committee = models.ForeignKey(Committee, on_delete=models.CASCADE)
def session(self):
return self.committee.session
timestamp = models.DateTimeField(auto_now=True, blank=True, null=True)
FEMALE = 'F'
MALE = 'M'
OTHER = 'O'
GENDERS = (
(FEMALE, 'Female'),
(MALE, 'Male'),
(OTHER, 'Other')
)
gender = models.CharField(max_length=1, choices=GENDERS, default=FEMALE)
#Finally we can add an admin definition
def __str__(self):
return str(self.gender)
|
eyp-developers/statistics
|
statisticscore/models.py
|
Python
|
gpl-3.0
| 22,641
|
[
"Amber"
] |
6b3a2c688fc2b56253e4a0a72c32aaa9dadb021f9c85af1b2bb57a1762e57b11
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
import nova.policy
from nova import rpc
from nova import test
from nova import utils
from nova.network import manager as network_manager
from nova.tests import fake_network
LOG = logging.getLogger('nova.tests.network')
HOST = "testhost"
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_id': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_id': 0},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_id': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.network = network_manager.FlatManager(host=HOST)
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.instance_dns_manager = temp
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def tearDown(self):
super(FlatNetworkTestCase, self).tearDown()
self.network.instance_dns_manager.delete_dns_file()
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
for i, (nw, info) in enumerate(nw_info):
check = {'bridge': 'fake_br%d' % i,
'cidr': '192.168.%s.0/24' % i,
'cidr_v6': '2001:db8:0:%x::/64' % i,
'id': i,
'multi_host': False,
'injected': False,
'bridge_interface': 'fake_eth%d' % i,
'vlan': None}
self.assertDictMatch(nw, check)
check = {'broadcast': '192.168.%d.255' % i,
'dhcp_server': '192.168.%d.1' % i,
'dns': ['192.168.%d.3' % n, '192.168.%d.4' % n],
'gateway': '192.168.%d.1' % i,
'gateway_v6': '2001:db8:0:%x::1' % i,
'ip6s': 'DONTCARE',
'ips': 'DONTCARE',
'label': 'test%d' % i,
'mac': 'DE:AD:BE:EF:00:%02x' % i,
'rxtx_cap': "%d" % i * 3,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % i,
'rxtx_cap': 3,
'should_create_vlan': False,
'should_create_bridge': False}
self.assertDictMatch(info, check)
check = [{'enabled': 'DONTCARE',
'ip': '2001:db8::dcad:beff:feef:%s' % i,
'netmask': '64'}]
self.assertDictListMatch(info['ip6s'], check)
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
'ip': '192.168.%d.1%02d' % (i, ip_num),
'netmask': '255.255.255.0'}
for ip_num in xrange(num_fixed_ips)]
self.assertDictListMatch(info['ips'], check)
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[1])
ip = fixed_ips[1].copy()
ip['instance_id'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_instance_dns(self):
fixedip = '192.168.0.101'
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixedip)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
addresses = instance_manager.get_entries_by_name('test-00001',
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
reserved=True).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(None, 0, network, vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
context_admin,
0,
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, 0, network)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_validate_networks(self):
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
fixed_ips[1]['network_id'] = networks[1]['id']
fixed_ips[1]['instance_id'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixed_ips[1])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id + '1'}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return {'address': '10.0.0.1'}
def fake2(*args, **kwargs):
return 25
def fake3(*args, **kwargs):
return 0
self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake1)
# this time should raise
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake2)
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
ctxt,
ctxt.project_id)
# this time should not
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake3)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': 1}
def fake3(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': None}
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
def test_associate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that's already associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# floating ip that isn't associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False, 'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise exception.ProcessExecutionError('',
'Cannot find device "em0"\n')
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'cast', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# floating ip that is associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False,
'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'cast', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}],
'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_ip_association_and_allocation_of_other_project(self):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project"""
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
address = '1.2.3.4'
float_addr = db.floating_ip_create(context1.elevated(),
{'address': address,
'project_id': context1.project_id})
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['id'])
# Associate the IP with non-admin user context
self.assertRaises(exception.NotAuthorized,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.NotAuthorized,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.NotAuthorized,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr)
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
def fake_create_fixed_ips(self, context, network_id):
return None
def test_remove_fixed_ip_from_instance(self):
manager = fake_network.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(self.context, 99, '10.0.0.1')
self.assertEquals(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = fake_network.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_smaller_subnet_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/25'}])
self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_split_smaller_cidr_in_use2(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/27' in cidrs)
def test_validate_cidrs_split_all_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# ValueError: cidr already in use
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
def test_validate_cidrs_conflict_existing_supernet(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_get_instance_uuids_by_ip_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id'])
def test_get_instance_uuids_by_ipv6_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id'])
def test_get_instance_uuids_by_ip(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
def test_get_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn(networks)
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
def test_get_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn([])
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
def test_get_all_networks(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all')
manager.db.network_get_all(mox.IgnoreArg()).\
AndReturn(networks)
self.mox.ReplayAll()
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
def test_disassociate_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn(networks)
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
def test_disassociate_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn([])
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP"""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP"""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes."""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return {}
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP"""
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP"""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.network = TestFloatingIPManager()
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.floating_dns_manager = temp
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def tearDown(self):
super(FloatingIPTestCase, self).tearDown()
self.network.floating_dns_manager.delete_dns_file()
def test_double_deallocation(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 2)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[1]['domain'], domain2)
self.assertEquals(domains[0]['project'], 'testproject')
self.assertEquals(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEquals(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
nova.policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
network_manager.check_policy(self.context, 'get_all')
self.mox.UnsetStubs()
self.mox.VerifyAll()
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS"""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.network = TestFloatingIPManager()
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.instance_dns_manager = temp
temp = utils.import_object('nova.network.dns_driver.DNSDriver')
self.network.floating_dns_manager = temp
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def tearDown(self):
super(InstanceDNSTestCase, self).tearDown()
self.network.instance_dns_manager.delete_dns_file()
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 1)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS"""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
temp = utils.import_object('nova.network.ldapdns.FakeLdapDNS')
self.driver = temp
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
super(LdapDNSTestCase, self).tearDown()
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
rcbops/nova-buildpackage
|
nova/tests/test_network.py
|
Python
|
apache-2.0
| 66,384
|
[
"FEFF"
] |
a312e5e85e805ec296ed47516abff416cb93326c1f71281dc862769a8feb412f
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.common.types import operation_metadata as operation_metadata_pb2 # type: ignore
from google.cloud.filestore_v1.services.cloud_filestore_manager import (
CloudFilestoreManagerAsyncClient,
)
from google.cloud.filestore_v1.services.cloud_filestore_manager import (
CloudFilestoreManagerClient,
)
from google.cloud.filestore_v1.services.cloud_filestore_manager import pagers
from google.cloud.filestore_v1.services.cloud_filestore_manager import transports
from google.cloud.filestore_v1.types import cloud_filestore_service
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CloudFilestoreManagerClient._get_default_mtls_endpoint(None) is None
assert (
CloudFilestoreManagerClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
CloudFilestoreManagerClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
CloudFilestoreManagerClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
CloudFilestoreManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
CloudFilestoreManagerClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [CloudFilestoreManagerClient, CloudFilestoreManagerAsyncClient,]
)
def test_cloud_filestore_manager_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "file.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.CloudFilestoreManagerGrpcTransport, "grpc"),
(transports.CloudFilestoreManagerGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_cloud_filestore_manager_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [CloudFilestoreManagerClient, CloudFilestoreManagerAsyncClient,]
)
def test_cloud_filestore_manager_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "file.googleapis.com:443"
def test_cloud_filestore_manager_client_get_transport_class():
transport = CloudFilestoreManagerClient.get_transport_class()
available_transports = [
transports.CloudFilestoreManagerGrpcTransport,
]
assert transport in available_transports
transport = CloudFilestoreManagerClient.get_transport_class("grpc")
assert transport == transports.CloudFilestoreManagerGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
CloudFilestoreManagerClient,
transports.CloudFilestoreManagerGrpcTransport,
"grpc",
),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
CloudFilestoreManagerClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudFilestoreManagerClient),
)
@mock.patch.object(
CloudFilestoreManagerAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudFilestoreManagerAsyncClient),
)
def test_cloud_filestore_manager_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CloudFilestoreManagerClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CloudFilestoreManagerClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
CloudFilestoreManagerClient,
transports.CloudFilestoreManagerGrpcTransport,
"grpc",
"true",
),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
CloudFilestoreManagerClient,
transports.CloudFilestoreManagerGrpcTransport,
"grpc",
"false",
),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
CloudFilestoreManagerClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudFilestoreManagerClient),
)
@mock.patch.object(
CloudFilestoreManagerAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudFilestoreManagerAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cloud_filestore_manager_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [CloudFilestoreManagerClient, CloudFilestoreManagerAsyncClient]
)
@mock.patch.object(
CloudFilestoreManagerClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudFilestoreManagerClient),
)
@mock.patch.object(
CloudFilestoreManagerAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudFilestoreManagerAsyncClient),
)
def test_cloud_filestore_manager_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
CloudFilestoreManagerClient,
transports.CloudFilestoreManagerGrpcTransport,
"grpc",
),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cloud_filestore_manager_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
CloudFilestoreManagerClient,
transports.CloudFilestoreManagerGrpcTransport,
"grpc",
grpc_helpers,
),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_cloud_filestore_manager_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_cloud_filestore_manager_client_client_options_from_dict():
with mock.patch(
"google.cloud.filestore_v1.services.cloud_filestore_manager.transports.CloudFilestoreManagerGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = CloudFilestoreManagerClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
CloudFilestoreManagerClient,
transports.CloudFilestoreManagerGrpcTransport,
"grpc",
grpc_helpers,
),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_cloud_filestore_manager_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"file.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="file.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.ListInstancesRequest, dict,]
)
def test_list_instances(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.ListInstancesResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstancesPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_instances_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
client.list_instances()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.ListInstancesRequest()
@pytest.mark.asyncio
async def test_list_instances_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.ListInstancesRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.ListInstancesResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstancesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_instances_async_from_dict():
await test_list_instances_async(request_type=dict)
def test_list_instances_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = cloud_filestore_service.ListInstancesResponse()
client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_instances_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.ListInstancesResponse()
)
await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_instances_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.ListInstancesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_instances(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_instances_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_instances(
cloud_filestore_service.ListInstancesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_instances_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.ListInstancesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.ListInstancesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_instances(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_instances_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_instances(
cloud_filestore_service.ListInstancesRequest(), parent="parent_value",
)
def test_list_instances_pager(transport_name: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
next_page_token="abc",
),
cloud_filestore_service.ListInstancesResponse(
instances=[], next_page_token="def",
),
cloud_filestore_service.ListInstancesResponse(
instances=[cloud_filestore_service.Instance(),], next_page_token="ghi",
),
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_instances(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_filestore_service.Instance) for i in results)
def test_list_instances_pages(transport_name: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
next_page_token="abc",
),
cloud_filestore_service.ListInstancesResponse(
instances=[], next_page_token="def",
),
cloud_filestore_service.ListInstancesResponse(
instances=[cloud_filestore_service.Instance(),], next_page_token="ghi",
),
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
),
RuntimeError,
)
pages = list(client.list_instances(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_instances_async_pager():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
next_page_token="abc",
),
cloud_filestore_service.ListInstancesResponse(
instances=[], next_page_token="def",
),
cloud_filestore_service.ListInstancesResponse(
instances=[cloud_filestore_service.Instance(),], next_page_token="ghi",
),
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
),
RuntimeError,
)
async_pager = await client.list_instances(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_filestore_service.Instance) for i in responses)
@pytest.mark.asyncio
async def test_list_instances_async_pages():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
next_page_token="abc",
),
cloud_filestore_service.ListInstancesResponse(
instances=[], next_page_token="def",
),
cloud_filestore_service.ListInstancesResponse(
instances=[cloud_filestore_service.Instance(),], next_page_token="ghi",
),
cloud_filestore_service.ListInstancesResponse(
instances=[
cloud_filestore_service.Instance(),
cloud_filestore_service.Instance(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_instances(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.GetInstanceRequest, dict,]
)
def test_get_instance(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.Instance(
name="name_value",
description="description_value",
state=cloud_filestore_service.Instance.State.CREATING,
status_message="status_message_value",
tier=cloud_filestore_service.Instance.Tier.STANDARD,
etag="etag_value",
)
response = client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_filestore_service.Instance)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == cloud_filestore_service.Instance.State.CREATING
assert response.status_message == "status_message_value"
assert response.tier == cloud_filestore_service.Instance.Tier.STANDARD
assert response.etag == "etag_value"
def test_get_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
client.get_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.GetInstanceRequest()
@pytest.mark.asyncio
async def test_get_instance_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.GetInstanceRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.Instance(
name="name_value",
description="description_value",
state=cloud_filestore_service.Instance.State.CREATING,
status_message="status_message_value",
tier=cloud_filestore_service.Instance.Tier.STANDARD,
etag="etag_value",
)
)
response = await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_filestore_service.Instance)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == cloud_filestore_service.Instance.State.CREATING
assert response.status_message == "status_message_value"
assert response.tier == cloud_filestore_service.Instance.Tier.STANDARD
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_instance_async_from_dict():
await test_get_instance_async(request_type=dict)
def test_get_instance_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = cloud_filestore_service.Instance()
client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_instance_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.Instance()
)
await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_instance_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.Instance()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_instance_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_instance(
cloud_filestore_service.GetInstanceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_instance_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.Instance()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.Instance()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_instance_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_instance(
cloud_filestore_service.GetInstanceRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.CreateInstanceRequest, dict,]
)
def test_create_instance(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
client.create_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.CreateInstanceRequest()
@pytest.mark.asyncio
async def test_create_instance_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.CreateInstanceRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_instance_async_from_dict():
await test_create_instance_async(request_type=dict)
def test_create_instance_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.CreateInstanceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_instance_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.CreateInstanceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_instance_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_instance(
parent="parent_value",
instance=cloud_filestore_service.Instance(name="name_value"),
instance_id="instance_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].instance
mock_val = cloud_filestore_service.Instance(name="name_value")
assert arg == mock_val
arg = args[0].instance_id
mock_val = "instance_id_value"
assert arg == mock_val
def test_create_instance_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_instance(
cloud_filestore_service.CreateInstanceRequest(),
parent="parent_value",
instance=cloud_filestore_service.Instance(name="name_value"),
instance_id="instance_id_value",
)
@pytest.mark.asyncio
async def test_create_instance_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_instance(
parent="parent_value",
instance=cloud_filestore_service.Instance(name="name_value"),
instance_id="instance_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].instance
mock_val = cloud_filestore_service.Instance(name="name_value")
assert arg == mock_val
arg = args[0].instance_id
mock_val = "instance_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_instance_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_instance(
cloud_filestore_service.CreateInstanceRequest(),
parent="parent_value",
instance=cloud_filestore_service.Instance(name="name_value"),
instance_id="instance_id_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.UpdateInstanceRequest, dict,]
)
def test_update_instance(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.UpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
client.update_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.UpdateInstanceRequest()
@pytest.mark.asyncio
async def test_update_instance_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.UpdateInstanceRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.UpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_instance_async_from_dict():
await test_update_instance_async(request_type=dict)
def test_update_instance_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.UpdateInstanceRequest()
request.instance.name = "instance.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_instance_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.UpdateInstanceRequest()
request.instance.name = "instance.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[
"metadata"
]
def test_update_instance_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_instance(
instance=cloud_filestore_service.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].instance
mock_val = cloud_filestore_service.Instance(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_instance_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_instance(
cloud_filestore_service.UpdateInstanceRequest(),
instance=cloud_filestore_service.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_instance_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_instance(
instance=cloud_filestore_service.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].instance
mock_val = cloud_filestore_service.Instance(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_instance_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_instance(
cloud_filestore_service.UpdateInstanceRequest(),
instance=cloud_filestore_service.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.RestoreInstanceRequest, dict,]
)
def test_restore_instance(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.restore_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.RestoreInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_restore_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_instance), "__call__") as call:
client.restore_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.RestoreInstanceRequest()
@pytest.mark.asyncio
async def test_restore_instance_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.RestoreInstanceRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.restore_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.RestoreInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_restore_instance_async_from_dict():
await test_restore_instance_async(request_type=dict)
def test_restore_instance_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.RestoreInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.restore_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_restore_instance_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.RestoreInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.restore_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.DeleteInstanceRequest, dict,]
)
def test_delete_instance(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
client.delete_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.DeleteInstanceRequest()
@pytest.mark.asyncio
async def test_delete_instance_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.DeleteInstanceRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_instance_async_from_dict():
await test_delete_instance_async(request_type=dict)
def test_delete_instance_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_instance_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_instance_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_instance_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_instance(
cloud_filestore_service.DeleteInstanceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_instance_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_instance_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_instance(
cloud_filestore_service.DeleteInstanceRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.ListBackupsRequest, dict,]
)
def test_list_backups(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.ListBackupsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_backups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.ListBackupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBackupsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_backups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
client.list_backups()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.ListBackupsRequest()
@pytest.mark.asyncio
async def test_list_backups_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.ListBackupsRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.ListBackupsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_backups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.ListBackupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBackupsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_backups_async_from_dict():
await test_list_backups_async(request_type=dict)
def test_list_backups_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.ListBackupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
call.return_value = cloud_filestore_service.ListBackupsResponse()
client.list_backups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_backups_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.ListBackupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.ListBackupsResponse()
)
await client.list_backups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_backups_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.ListBackupsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_backups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_backups_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_backups(
cloud_filestore_service.ListBackupsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_backups_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.ListBackupsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.ListBackupsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_backups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_backups_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_backups(
cloud_filestore_service.ListBackupsRequest(), parent="parent_value",
)
def test_list_backups_pager(transport_name: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
next_page_token="abc",
),
cloud_filestore_service.ListBackupsResponse(
backups=[], next_page_token="def",
),
cloud_filestore_service.ListBackupsResponse(
backups=[cloud_filestore_service.Backup(),], next_page_token="ghi",
),
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_backups(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_filestore_service.Backup) for i in results)
def test_list_backups_pages(transport_name: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
next_page_token="abc",
),
cloud_filestore_service.ListBackupsResponse(
backups=[], next_page_token="def",
),
cloud_filestore_service.ListBackupsResponse(
backups=[cloud_filestore_service.Backup(),], next_page_token="ghi",
),
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
),
RuntimeError,
)
pages = list(client.list_backups(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_backups_async_pager():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
next_page_token="abc",
),
cloud_filestore_service.ListBackupsResponse(
backups=[], next_page_token="def",
),
cloud_filestore_service.ListBackupsResponse(
backups=[cloud_filestore_service.Backup(),], next_page_token="ghi",
),
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
),
RuntimeError,
)
async_pager = await client.list_backups(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_filestore_service.Backup) for i in responses)
@pytest.mark.asyncio
async def test_list_backups_async_pages():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
next_page_token="abc",
),
cloud_filestore_service.ListBackupsResponse(
backups=[], next_page_token="def",
),
cloud_filestore_service.ListBackupsResponse(
backups=[cloud_filestore_service.Backup(),], next_page_token="ghi",
),
cloud_filestore_service.ListBackupsResponse(
backups=[
cloud_filestore_service.Backup(),
cloud_filestore_service.Backup(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_backups(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.GetBackupRequest, dict,]
)
def test_get_backup(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.Backup(
name="name_value",
description="description_value",
state=cloud_filestore_service.Backup.State.CREATING,
capacity_gb=1142,
storage_bytes=1403,
source_instance="source_instance_value",
source_file_share="source_file_share_value",
source_instance_tier=cloud_filestore_service.Instance.Tier.STANDARD,
download_bytes=1502,
)
response = client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.GetBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_filestore_service.Backup)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == cloud_filestore_service.Backup.State.CREATING
assert response.capacity_gb == 1142
assert response.storage_bytes == 1403
assert response.source_instance == "source_instance_value"
assert response.source_file_share == "source_file_share_value"
assert (
response.source_instance_tier == cloud_filestore_service.Instance.Tier.STANDARD
)
assert response.download_bytes == 1502
def test_get_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
client.get_backup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.GetBackupRequest()
@pytest.mark.asyncio
async def test_get_backup_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.GetBackupRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.Backup(
name="name_value",
description="description_value",
state=cloud_filestore_service.Backup.State.CREATING,
capacity_gb=1142,
storage_bytes=1403,
source_instance="source_instance_value",
source_file_share="source_file_share_value",
source_instance_tier=cloud_filestore_service.Instance.Tier.STANDARD,
download_bytes=1502,
)
)
response = await client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.GetBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_filestore_service.Backup)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == cloud_filestore_service.Backup.State.CREATING
assert response.capacity_gb == 1142
assert response.storage_bytes == 1403
assert response.source_instance == "source_instance_value"
assert response.source_file_share == "source_file_share_value"
assert (
response.source_instance_tier == cloud_filestore_service.Instance.Tier.STANDARD
)
assert response.download_bytes == 1502
@pytest.mark.asyncio
async def test_get_backup_async_from_dict():
await test_get_backup_async(request_type=dict)
def test_get_backup_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.GetBackupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
call.return_value = cloud_filestore_service.Backup()
client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_backup_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.GetBackupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.Backup()
)
await client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_backup_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.Backup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_backup(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_backup_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_backup(
cloud_filestore_service.GetBackupRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_backup_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_filestore_service.Backup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_filestore_service.Backup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_backup(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_backup_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_backup(
cloud_filestore_service.GetBackupRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.CreateBackupRequest, dict,]
)
def test_create_backup(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.CreateBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
client.create_backup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.CreateBackupRequest()
@pytest.mark.asyncio
async def test_create_backup_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.CreateBackupRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.CreateBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_backup_async_from_dict():
await test_create_backup_async(request_type=dict)
def test_create_backup_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.CreateBackupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_backup_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.CreateBackupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_backup_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_backup(
parent="parent_value",
backup=cloud_filestore_service.Backup(name="name_value"),
backup_id="backup_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].backup
mock_val = cloud_filestore_service.Backup(name="name_value")
assert arg == mock_val
arg = args[0].backup_id
mock_val = "backup_id_value"
assert arg == mock_val
def test_create_backup_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_backup(
cloud_filestore_service.CreateBackupRequest(),
parent="parent_value",
backup=cloud_filestore_service.Backup(name="name_value"),
backup_id="backup_id_value",
)
@pytest.mark.asyncio
async def test_create_backup_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_backup(
parent="parent_value",
backup=cloud_filestore_service.Backup(name="name_value"),
backup_id="backup_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].backup
mock_val = cloud_filestore_service.Backup(name="name_value")
assert arg == mock_val
arg = args[0].backup_id
mock_val = "backup_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_backup_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_backup(
cloud_filestore_service.CreateBackupRequest(),
parent="parent_value",
backup=cloud_filestore_service.Backup(name="name_value"),
backup_id="backup_id_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.DeleteBackupRequest, dict,]
)
def test_delete_backup(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.DeleteBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
client.delete_backup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.DeleteBackupRequest()
@pytest.mark.asyncio
async def test_delete_backup_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.DeleteBackupRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.DeleteBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_backup_async_from_dict():
await test_delete_backup_async(request_type=dict)
def test_delete_backup_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.DeleteBackupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_backup_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.DeleteBackupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_backup_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_backup(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_backup_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_backup(
cloud_filestore_service.DeleteBackupRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_backup_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_backup(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_backup_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_backup(
cloud_filestore_service.DeleteBackupRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_filestore_service.UpdateBackupRequest, dict,]
)
def test_update_backup(request_type, transport: str = "grpc"):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.UpdateBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
client.update_backup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.UpdateBackupRequest()
@pytest.mark.asyncio
async def test_update_backup_async(
transport: str = "grpc_asyncio",
request_type=cloud_filestore_service.UpdateBackupRequest,
):
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_filestore_service.UpdateBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_backup_async_from_dict():
await test_update_backup_async(request_type=dict)
def test_update_backup_field_headers():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.UpdateBackupRequest()
request.backup.name = "backup.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_backup_field_headers_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_filestore_service.UpdateBackupRequest()
request.backup.name = "backup.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"]
def test_update_backup_flattened():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_backup(
backup=cloud_filestore_service.Backup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].backup
mock_val = cloud_filestore_service.Backup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_backup_flattened_error():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_backup(
cloud_filestore_service.UpdateBackupRequest(),
backup=cloud_filestore_service.Backup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_backup_flattened_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_backup(
backup=cloud_filestore_service.Backup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].backup
mock_val = cloud_filestore_service.Backup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_backup_flattened_error_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_backup(
cloud_filestore_service.UpdateBackupRequest(),
backup=cloud_filestore_service.Backup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CloudFilestoreManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CloudFilestoreManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudFilestoreManagerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.CloudFilestoreManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudFilestoreManagerClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudFilestoreManagerClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.CloudFilestoreManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudFilestoreManagerClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudFilestoreManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CloudFilestoreManagerClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudFilestoreManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CloudFilestoreManagerGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudFilestoreManagerGrpcTransport,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.CloudFilestoreManagerGrpcTransport,)
def test_cloud_filestore_manager_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CloudFilestoreManagerTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_cloud_filestore_manager_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.filestore_v1.services.cloud_filestore_manager.transports.CloudFilestoreManagerTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.CloudFilestoreManagerTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_instances",
"get_instance",
"create_instance",
"update_instance",
"restore_instance",
"delete_instance",
"list_backups",
"get_backup",
"create_backup",
"delete_backup",
"update_backup",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_cloud_filestore_manager_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.filestore_v1.services.cloud_filestore_manager.transports.CloudFilestoreManagerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudFilestoreManagerTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_cloud_filestore_manager_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.filestore_v1.services.cloud_filestore_manager.transports.CloudFilestoreManagerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudFilestoreManagerTransport()
adc.assert_called_once()
def test_cloud_filestore_manager_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudFilestoreManagerClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudFilestoreManagerGrpcTransport,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
],
)
def test_cloud_filestore_manager_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudFilestoreManagerGrpcTransport, grpc_helpers),
(transports.CloudFilestoreManagerGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_cloud_filestore_manager_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"file.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="file.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudFilestoreManagerGrpcTransport,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
],
)
def test_cloud_filestore_manager_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_cloud_filestore_manager_host_no_port():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint="file.googleapis.com"),
)
assert client.transport._host == "file.googleapis.com:443"
def test_cloud_filestore_manager_host_with_port():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="file.googleapis.com:8000"
),
)
assert client.transport._host == "file.googleapis.com:8000"
def test_cloud_filestore_manager_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudFilestoreManagerGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cloud_filestore_manager_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudFilestoreManagerGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudFilestoreManagerGrpcTransport,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
],
)
def test_cloud_filestore_manager_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudFilestoreManagerGrpcTransport,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
],
)
def test_cloud_filestore_manager_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_cloud_filestore_manager_grpc_lro_client():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_cloud_filestore_manager_grpc_lro_async_client():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_backup_path():
project = "squid"
location = "clam"
backup = "whelk"
expected = "projects/{project}/locations/{location}/backups/{backup}".format(
project=project, location=location, backup=backup,
)
actual = CloudFilestoreManagerClient.backup_path(project, location, backup)
assert expected == actual
def test_parse_backup_path():
expected = {
"project": "octopus",
"location": "oyster",
"backup": "nudibranch",
}
path = CloudFilestoreManagerClient.backup_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_backup_path(path)
assert expected == actual
def test_instance_path():
project = "cuttlefish"
location = "mussel"
instance = "winkle"
expected = "projects/{project}/locations/{location}/instances/{instance}".format(
project=project, location=location, instance=instance,
)
actual = CloudFilestoreManagerClient.instance_path(project, location, instance)
assert expected == actual
def test_parse_instance_path():
expected = {
"project": "nautilus",
"location": "scallop",
"instance": "abalone",
}
path = CloudFilestoreManagerClient.instance_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_instance_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = CloudFilestoreManagerClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = CloudFilestoreManagerClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = CloudFilestoreManagerClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = CloudFilestoreManagerClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = CloudFilestoreManagerClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = CloudFilestoreManagerClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = CloudFilestoreManagerClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = CloudFilestoreManagerClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = CloudFilestoreManagerClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = CloudFilestoreManagerClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CloudFilestoreManagerClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.CloudFilestoreManagerTransport, "_prep_wrapped_messages"
) as prep:
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.CloudFilestoreManagerTransport, "_prep_wrapped_messages"
) as prep:
transport_class = CloudFilestoreManagerClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = CloudFilestoreManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = CloudFilestoreManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(CloudFilestoreManagerClient, transports.CloudFilestoreManagerGrpcTransport),
(
CloudFilestoreManagerAsyncClient,
transports.CloudFilestoreManagerGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-filestore
|
tests/unit/gapic/filestore_v1/test_cloud_filestore_manager.py
|
Python
|
apache-2.0
| 157,841
|
[
"Octopus"
] |
efc3f97f5c31130b8fc1c8c124e5cb7a1d9777821aad4ae381fa65ad2df56914
|
#!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
"""
Classify 16S fragments by mapping them to the GreenGenes DB with BWA.
"""
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2013'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '1.0.0'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import os
import sys
import argparse
import ntpath
from readConfig import ReadConfig
from bwaUtils import mapPair, mapSingle
from taxonomyUtils import LCA, readTaxonomy
import pysam
class ClassifyBWA(object):
def __init__(self):
self.unmappedStr = ['k__unmapped', 'p__unmapped', 'c__unmapped', 'o__unmapped', 'f__unmapped', 'g__unmapped', 's__unmapped', 'id__unmapped']
self.dbFiles = {'GG94': '/srv/whitlam/bio/db/communitym/201305_gg/94_otus.fasta',
'GG97': '/srv/whitlam/bio/db/communitym/201305_gg/97_otus.fasta',
'GG99': '/srv/whitlam/bio/db/communitym/201305_gg/99_otus.fasta',
'SILVA98': '/srv/whitlam/bio/db/communitym/silva/SSURef_111_NR_trunc.acgt.fna'}
self.taxonomyFiles = {'GG94': '/srv/whitlam/bio/db/communitym/201305_gg/94_otu_taxonomy.txt',
'GG97': '/srv/whitlam/bio/db/communitym/201305_gg/97_otu_taxonomy.txt',
'GG99': '/srv/whitlam/bio/db/communitym/201305_gg/99_otu_taxonomy.txt',
'SILVA98': '/srv/whitlam/bio/db/communitym/silva/SSURef_111_NR_taxonomy.txt'}
def processRead(self, bam, read, ggIdToTaxonomy, maxEditDistance, minLength, counts=None):
if read.is_unmapped:
if counts != None:
counts['unmapped'] += 1
return self.unmappedStr, False
elif (read.alen < minLength * read.rlen):
if counts != None:
counts['align len'] += 1
return self.unmappedStr, False
elif (read.opt('NM') > maxEditDistance * read.rlen):
if counts != None:
counts['edit dist'] += 1
return self.unmappedStr, False
taxonomy = ggIdToTaxonomy[bam.getrname(read.tid)]
return taxonomy, True
def readPairedBAM(self, bamFile, ggIdToTaxonomy, maxEditDistance, minLength):
# read compressed BAM file and report basic statistics
bam = pysam.Samfile(bamFile, 'rb')
# find primary mappings for each query read
readsMappedTo16S_1 = {}
readsMappedTo16S_2 = {}
editDists = {}
counts = {'unmapped':0, 'edit dist':0, 'align len':0}
numMultiplePrimaryMappings = 0
for read in bam.fetch(until_eof=True):
if not read.is_secondary:
taxonomy, bMapped = self.processRead(bam, read, ggIdToTaxonomy, maxEditDistance, minLength, counts)
if bMapped:
editDist = read.opt('NM')
else:
editDist = -1 # flag as unmapped
if read.is_read1:
qname = read.qname + '/1'
readsMappedTo16S = readsMappedTo16S_1
elif read.is_read2:
qname = read.qname + '/2'
readsMappedTo16S = readsMappedTo16S_2
if qname in readsMappedTo16S and bMapped:
# read has multiple primary alignments for different parts of the query sequence
# which may indicate it is chimeric. For classification purposes, the LCA of
# all primary alignments is taken.
lca = LCA(readsMappedTo16S[qname], taxonomy)
readsMappedTo16S[qname] = lca
editDists[qname] = max(editDist, editDists[qname])
numMultiplePrimaryMappings += 1
else:
readsMappedTo16S[qname] = taxonomy
editDists[qname] = editDist
# process secondary mappings for each query read
numSecondaryMappings = 0
for read in bam.fetch(until_eof=True):
if read.is_secondary:
# process primary read
taxonomy, bMapped = self.processRead(bam, read, ggIdToTaxonomy, maxEditDistance, minLength)
editDist = read.opt('NM')
if read.is_read1:
qname = read.qname + '/1'
readsMappedTo16S = readsMappedTo16S_1
elif read.is_read2:
qname = read.qname + '/2'
readsMappedTo16S = readsMappedTo16S_2
if bMapped and editDist <= editDists[qname]:
numSecondaryMappings = 0
lca = LCA(readsMappedTo16S[qname], taxonomy)
readsMappedTo16S[qname] = lca
bam.close()
if len(readsMappedTo16S_1) != len(readsMappedTo16S_2):
print '[Error] Paired files do not have the same number of reads.'
sys.exit()
numReads = 2 * len(readsMappedTo16S)
print ' Number of paired reads: %d' % numReads
print ' Reads unmapped: %d (%.2f%%)' % (counts['unmapped'], float(counts['unmapped']) * 100 / max(numReads, 1))
print ' Reads failing edit distance threshold: %d (%.2f%%)' % (counts['edit dist'], float(counts['edit dist']) * 100 / max(numReads, 1))
print ' Reads failing alignment length threshold: %d (%.2f%%)' % (counts['align len'], float(counts['align len']) * 100 / max(numReads, 1))
print ' # multiple primary mappings: %d (%.2f%%)' % (numMultiplePrimaryMappings, float(numMultiplePrimaryMappings) * 100 / max(numReads, 1))
print ' # equally good secondary mappings: %d (%.2f%%)' % (numSecondaryMappings, float(numSecondaryMappings) * 100 / max(numReads, 1))
return readsMappedTo16S_1, readsMappedTo16S_2
def readSingleBAM(self, bamFile, ggIdToTaxonomy, maxEditDistance, minLength):
# read compressed BAM file
bam = pysam.Samfile(bamFile, 'rb')
# find primary mappings for each query read
readsMappedTo16S = {}
editDists = {}
counts = {'unmapped':0, 'edit dist':0, 'align len':0}
numMultiplePrimaryMappings = 0
for read in bam.fetch(until_eof=True):
if not read.is_secondary:
taxonomy, bMapped = self.processRead(bam, read, ggIdToTaxonomy, maxEditDistance, minLength, counts)
if bMapped:
editDist = read.opt('NM')
else:
editDist = -1 # flag as unmapped
if read.qname in readsMappedTo16S and editDists[read.qname] != -1:
# read has multiple primary alignments for different parts of the query sequence
# which may indicate it is chimeric. For classification purposes, the LCA of
# all primary alignments is taken.
lca = LCA(readsMappedTo16S[read.qname], taxonomy)
readsMappedTo16S[read.qname] = lca
editDists[read.qname] = max(editDist, editDists[read.qname])
numMultiplePrimaryMappings += 1
else:
readsMappedTo16S[read.qname] = taxonomy
editDists[read.qname] = editDist
# process secondary mappings for each query read
numSecondaryMappings = 0
for read in bam.fetch(until_eof=True):
if read.is_secondary:
# process primary read
taxonomy, bMapped = self.processRead(bam, read, ggIdToTaxonomy, maxEditDistance, minLength)
editDist = read.opt('NM')
if bMapped and editDist <= editDists[read.qname]:
numSecondaryMappings += 1
lca = LCA(readsMappedTo16S[read.qname], taxonomy)
readsMappedTo16S[read.qname] = lca
bam.close()
numReads = len(readsMappedTo16S)
print ' Number of singleton reads: %d' % numReads
print ' Reads unmapped: %d (%.2f%%)' % (counts['unmapped'], float(counts['unmapped']) * 100 / max(numReads, 1))
print ' Reads failing edit distance threshold: %d (%.2f%%)' % (counts['edit dist'], float(counts['edit dist']) * 100 / max(numReads, 1))
print ' Reads failing alignment length threshold: %d (%.2f%%)' % (counts['align len'], float(counts['align len']) * 100 / max(numReads, 1))
print ' # multiple primary mappings: %d (%.2f%%)' % (numMultiplePrimaryMappings, float(numMultiplePrimaryMappings) * 100 / max(numReads, 1))
print ' # equally good secondary mappings: %d (%.2f%%)' % (numSecondaryMappings, float(numSecondaryMappings) * 100 / max(numReads, 1))
return readsMappedTo16S
def writeClassification(self, filename, mappedReads):
fout = open(filename, 'w')
for refName, taxonomy in mappedReads.iteritems():
fout.write(refName + '\t' + ';'.join(taxonomy) + '\n')
fout.close()
def processPairs(self, pairs, ggIdToTaxonomy, maxEditDistance, minLength, outputDir, prefix):
for i in xrange(0, len(pairs), 2):
pair1 = pairs[i]
pair2 = pairs[i + 1]
pair1Base = ntpath.basename(pair1)
pair2Base = ntpath.basename(pair2)
print 'Identifying 16S sequences in paired-end reads: ' + pair1 + ', ' + pair2
# write out classifications for paired-end reads with both ends identified as 16S
bamFile = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.intersect.16S.bam'
readsMappedTo16S_1, readsMappedTo16S_2 = self.readPairedBAM(bamFile, ggIdToTaxonomy, maxEditDistance, minLength)
output1 = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.intersect.16S.tsv'
output2 = prefix + '.' + pair2Base[0:pair2Base.rfind('.')] + '.intersect.16S.tsv'
print ' Paired results written to: '
print ' ' + output1
print ' ' + output2 + '\n'
self.writeClassification(output1, readsMappedTo16S_1)
self.writeClassification(output2, readsMappedTo16S_2)
# write out classifications for paired-ends reads with only one end identified as 16S
bamFile = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.difference.16S.bam'
readsMappedTo16S = self.readSingleBAM(bamFile, ggIdToTaxonomy, maxEditDistance, minLength)
output = prefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.difference.16S.tsv'
print ' Singleton results written to: ' + output + '\n'
self.writeClassification(output, readsMappedTo16S)
def processSingles(self, singles, ggIdToTaxonomy, maxEditDistance, minLength, outputDir, prefix):
for i in xrange(0, len(singles)):
seqFile = singles[i]
print 'Identifying 16S sequences in single-end reads: ' + seqFile
singleBase = ntpath.basename(seqFile)
bamFile = prefix + '.' + singleBase[0:singleBase.rfind('.')] + '.16S.bam'
readsMappedTo16S = self.readSingleBAM(bamFile, ggIdToTaxonomy, maxEditDistance, minLength)
output = prefix + '.' + singleBase[0:singleBase.rfind('.')] + '.16S.tsv'
print ' Classification results written to: ' + output + '\n'
self.writeClassification(output, readsMappedTo16S)
def run(self, projectParams, sampleParams, refDB, threads):
# check if classification directory already exists
dir_path = os.path.join(projectParams['output_dir'], 'classified')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
else:
rtn = raw_input('Remove previously classified reads (Y or N)? ')
if rtn.lower() == 'y' or rtn.lower() == 'yes':
files = os.listdir(dir_path)
for f in files:
os.remove(os.path.join(dir_path, f))
else:
sys.exit()
taxonomyFile = self.taxonomyFiles[refDB]
ggIdToTaxonomy = readTaxonomy(taxonomyFile)
dbFile = self.dbFiles[refDB]
print 'Classifying reads with: ' + dbFile
print 'Assigning taxonomy with: ' + taxonomyFile
print 'Threads: ' + str(threads)
print ''
if not os.path.exists(dbFile + '.amb'):
print 'Indexing Reference DB:'
os.system('bwa index -a is ' + dbFile)
print ''
# map reads
for sample in sampleParams:
print 'Mapping sample: ' + sample
outputDir = projectParams['output_dir']
inputPrefix = os.path.join(outputDir, 'extracted', sample)
outputPrefix = os.path.join(outputDir, 'classified', sample)
pairs = sampleParams[sample]['pairs']
singles = sampleParams[sample]['singles']
for i in xrange(0, len(pairs), 2):
pair1Base = ntpath.basename(pairs[i])
pair1File = inputPrefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.intersect.SSU.fasta'
pair2Base = ntpath.basename(pairs[i + 1])
pair2File = inputPrefix + '.' + pair2Base[0:pair2Base.rfind('.')] + '.intersect.SSU.fasta'
bamPrefix = ntpath.basename(pairs[i])
bamPrefixFile = outputPrefix + '.' + bamPrefix[0:bamPrefix.rfind('.')] + '.intersect.16S'
mapPair(dbFile, pair1File, pair2File, bamPrefixFile, threads)
diffFile = inputPrefix + '.' + pair1Base[0:pair1Base.rfind('.')] + '.difference.SSU.fasta'
bamPrefixFile = outputPrefix + '.' + bamPrefix[0:bamPrefix.rfind('.')] + '.difference.16S'
mapSingle(dbFile, diffFile, bamPrefixFile, threads)
for i in xrange(0, len(singles)):
singleBase = ntpath.basename(singles[i])
singleFile = inputPrefix + '.' + singleBase[0:singleBase.rfind('.')] + '.SSU.fasta'
bamPrefixFile = outputPrefix + '.' + singleBase[0:singleBase.rfind('.')] + '.16S'
mapSingle(dbFile, singleFile, bamPrefixFile, threads)
print '************************************************************'
# classify reads
for sample in sampleParams:
print 'Classifying sample: ' + sample
outputDir = os.path.join(projectParams['output_dir'], 'classified')
prefix = os.path.join(outputDir, sample)
pairs = sampleParams[sample]['pairs']
singles = sampleParams[sample]['singles']
maxEditDistance = sampleParams[sample]['edit_dist']
minLength = sampleParams[sample]['min_align_len']
# identify 16S sequences in paired-end reads
self.processPairs(pairs, ggIdToTaxonomy, maxEditDistance, minLength, outputDir, prefix)
# identify 16S sequences in single-end reads
self.processSingles(singles, ggIdToTaxonomy, maxEditDistance, minLength, outputDir, prefix)
print ''
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Classify 16S fragments by mapping them to the GreenGenes DB with BWA.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('config_file', help='project config file.')
parser.add_argument('ref_db', help='Reference DB to use for classification (choices: GG94, GG97, GG99, SILVA98)', choices=['GG94', 'GG97', 'GG99', 'SILVA98'])
parser.add_argument('-t', '--threads', help='number of threads', type=int, default=1)
args = parser.parse_args()
classifyBWA = ClassifyBWA()
rc = ReadConfig()
projectParams, sampleParams = rc.readConfig(args.config_file, outputDirExists=True)
classifyBWA.run(projectParams, sampleParams, args.ref_db, args.threads)
|
dparks1134/CommunityM
|
classifyBWA_16S.py
|
Python
|
gpl-3.0
| 17,166
|
[
"BWA",
"pysam"
] |
7d04091f5d60d760c29b1ed9f8d0f94b2b7d2896fa70b7c3f6284e203663f042
|
import sys, collections, itertools, os.path, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <in.gtf> <out.gff>",
description=
"Script to prepare annotation for DEXSeq." +
"This script takes an annotation file in Ensembl GTF format" +
"and outputs a 'flattened' annotation file suitable for use " +
"with the count_in_exons.py script ",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-r", "--aggregate", type="choice", dest="aggregate",
choices = ( "no", "yes" ), default = "yes",
help = "'yes' or 'no'. Indicates whether two or more genes sharing an exon should be merged into an 'aggregate gene'. If 'no', the exons that can not be assiged to a single gene are ignored." )
(opts, args) = optParser.parse_args()
if len( args ) != 2:
sys.stderr.write( "Script to prepare annotation for DEXSeq.\n\n" )
sys.stderr.write( "Usage: python %s <in.gtf> <out.gff>\n\n" % os.path.basename(sys.argv[0]) )
sys.stderr.write( "This script takes an annotation file in Ensembl GTF format\n" )
sys.stderr.write( "and outputs a 'flattened' annotation file suitable for use\n" )
sys.stderr.write( "with the count_in_exons.py script.\n" )
sys.exit(1)
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gtf_file = args[0]
out_file = args[1]
aggregateGenes = opts.aggregate == "yes"
# Step 1: Store all exons with their gene and transcript ID
# in a GenomicArrayOfSets
exons = HTSeq.GenomicArrayOfSets( "auto", stranded=True )
for f in HTSeq.GFF_Reader( gtf_file ):
if f.type != "exon":
continue
f.attr['gene_id'] = f.attr['gene_id'].replace( ":", "_" )
exons[f.iv] += ( f.attr['gene_id'], f.attr['transcript_id'] )
# Step 2: Form sets of overlapping genes
# We produce the dict 'gene_sets', whose values are sets of gene IDs. Each set
# contains IDs of genes that overlap, i.e., share bases (on the same strand).
# The keys of 'gene_sets' are the IDs of all genes, and each key refers to
# the set that contains the gene.
# Each gene set forms an 'aggregate gene'.
if aggregateGenes == True:
gene_sets = collections.defaultdict( lambda: set() )
for iv, s in exons.steps():
# For each step, make a set, 'full_set' of all the gene IDs occuring
# in the present step, and also add all those gene IDs, whch have been
# seen earlier to co-occur with each of the currently present gene IDs.
full_set = set()
for gene_id, transcript_id in s:
full_set.add( gene_id )
full_set |= gene_sets[ gene_id ]
# Make sure that all genes that are now in full_set get associated
# with full_set, i.e., get to know about their new partners
for gene_id in full_set:
assert gene_sets[ gene_id ] <= full_set
gene_sets[ gene_id ] = full_set
# Step 3: Go through the steps again to get the exonic sections. Each step
# becomes an 'exonic part'. The exonic part is associated with an
# aggregate gene, i.e., a gene set as determined in the previous step,
# and a transcript set, containing all transcripts that occur in the step.
# The results are stored in the dict 'aggregates', which contains, for each
# aggregate ID, a list of all its exonic_part features.
aggregates = collections.defaultdict( lambda: list() )
for iv, s in exons.steps( ):
# Skip empty steps
if len(s) == 0:
continue
gene_id = list(s)[0][0]
## if aggregateGenes=FALSE, ignore the exons associated to more than one gene ID
if aggregateGenes == False:
check_set = set()
for geneID, transcript_id in s:
check_set.add( geneID )
if( len( check_set ) > 1 ):
continue
else:
aggregate_id = gene_id
# Take one of the gene IDs, find the others via gene sets, and
# form the aggregate ID from all of them
else:
assert set( gene_id for gene_id, transcript_id in s ) <= gene_sets[ gene_id ]
aggregate_id = '+'.join( gene_sets[ gene_id ] )
# Make the feature and store it in 'aggregates'
f = HTSeq.GenomicFeature( aggregate_id, "exonic_part", iv )
f.source = os.path.basename( sys.argv[0] )
# f.source = "camara"
f.attr = {}
f.attr[ 'gene_id' ] = aggregate_id
transcript_set = set( ( transcript_id for gene_id, transcript_id in s ) )
f.attr[ 'transcripts' ] = '+'.join( transcript_set )
aggregates[ aggregate_id ].append( f )
# Step 4: For each aggregate, number the exonic parts
aggregate_features = []
for l in aggregates.values():
for i in xrange( len(l)-1 ):
assert l[i].name == l[i+1].name, str(l[i+1]) + " has wrong name"
assert l[i].iv.end <= l[i+1].iv.start, str(l[i+1]) + " starts too early"
if l[i].iv.chrom != l[i+1].iv.chrom:
raise ValueError, "Same name found on two chromosomes: %s, %s" % ( str(l[i]), str(l[i+1]) )
if l[i].iv.strand != l[i+1].iv.strand:
raise ValueError, "Same name found on two strands: %s, %s" % ( str(l[i]), str(l[i+1]) )
aggr_feat = HTSeq.GenomicFeature( l[0].name, "aggregate_gene",
HTSeq.GenomicInterval( l[0].iv.chrom, l[0].iv.start,
l[-1].iv.end, l[0].iv.strand ) )
aggr_feat.source = os.path.basename( sys.argv[0] )
aggr_feat.attr = { 'gene_id': aggr_feat.name }
for i in xrange( len(l) ):
l[i].attr['exonic_part_number'] = "%03d" % ( i+1 )
aggregate_features.append( aggr_feat )
# Step 5: Sort the aggregates, then write everything out
aggregate_features.sort( key = lambda f: ( f.iv.chrom, f.iv.start ) )
fout = open( out_file, "w" )
for aggr_feat in aggregate_features:
fout.write( aggr_feat.get_gff_line() )
for f in aggregates[ aggr_feat.name ]:
fout.write( f.get_gff_line() )
fout.close()
|
fchen365/RBP_models
|
python/dexseq_prepare_annotation.py
|
Python
|
mit
| 6,125
|
[
"HTSeq"
] |
74c498ebac5b29f42e56f99f17bd08bd0f4b5f9a09508ba58eaa0b6518609ba5
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
#
#
#
import bond_amber
import protein_residues
import protein_amber
from chempy.neighbor import Neighbor
from chempy.models import Connected
from chempy import Bond,place,feedback
from chempy.cpv import *
MAX_BOND_LEN = 2.2
PEPT_CUTOFF = 1.7
N_TERMINAL_ATOMS = ('HT','HT1','HT2','HT3','H1','H2','H3',
'1H','2H','3H','1HT','2HT','3HT')
C_TERMINAL_ATOMS = ('OXT','O2','OT1','OT2')
#---------------------------------------------------------------------------------
# NOTE: right now, the only way to get N-terminal residues is to
# submit a structure which contains at least one N_TERMINAL hydrogens
def generate(model, forcefield = protein_amber, histidine = 'HIE',
skip_sort=None, bondfield = bond_amber ):
strip_atom_bonds(model) # remove bonds between non-hetatms (ATOM)
add_bonds(model,forcefield=forcefield)
connected = model.convert_to_connected()
add_hydrogens(connected,forcefield=forcefield,skip_sort=skip_sort)
place.simple_unknowns(connected,bondfield = bondfield)
return connected.convert_to_indexed()
#---------------------------------------------------------------------------------
def strip_atom_bonds(model):
new_bond = []
matom = model.atom
for a in model.bond:
if matom[a.index[0]].hetatm or matom[a.index[1]].hetatm:
new_bond.append(a)
model.bond = new_bond
#---------------------------------------------------------------------------------
def assign_types(model, forcefield = protein_amber, histidine = 'HIE' ):
'''
assigns types: takes HIS -> HID,HIE,HIP and CYS->CYX where appropriate
but does not add any bonds!
'''
if feedback['actions']:
print " "+str(__name__)+": assigning types..."
if str(model.__class__) != 'chempy.models.Indexed':
raise ValueError('model is not an "Indexed" model object')
if model.nAtom:
crd = model.get_coord_list()
nbr = Neighbor(crd,MAX_BOND_LEN)
res_list = model.get_residues()
if len(res_list):
for a in res_list:
base = model.atom[a[0]]
if not base.hetatm:
resn = base.resn
if resn == 'HIS':
for c in range(a[0],a[1]): # this residue
model.atom[c].resn = histidine
resn = histidine
if resn == 'N-M': # N-methyl from Insight II,
for c in range(a[0],a[1]): # this residue
model.atom[c].resn = 'NME'
resn = 'NME'
# find out if this is n or c terminal residue
names = []
for b in range(a[0],a[1]):
names.append(model.atom[b].name)
tmpl = protein_residues.normal
if forcefield:
ffld = forcefield.normal
for b in N_TERMINAL_ATOMS:
if b in names:
tmpl = protein_residues.n_terminal
if forcefield:
ffld = forcefield.n_terminal
break
for b in C_TERMINAL_ATOMS:
if b in names:
tmpl = protein_residues.c_terminal
if forcefield:
ffld = forcefield.c_terminal
break
if not tmpl.has_key(resn):
raise RuntimeError("unknown residue type '"+resn+"'")
else:
# reassign atom names and build dictionary
dict = {}
aliases = tmpl[resn]['aliases']
for b in range(a[0],a[1]):
at = model.atom[b]
if aliases.has_key(at.name):
at.name = aliases[at.name]
dict[at.name] = b
if forcefield:
k = (resn,at.name)
if ffld.has_key(k):
at.text_type = ffld[k]['type']
at.partial_charge = ffld[k]['charge']
else:
raise RuntimeError("no parameters for '"+str(k)+"'")
if dict.has_key('SG'): # cysteine
cur = dict['SG']
at = model.atom[cur]
lst = nbr.get_neighbors(at.coord)
for b in lst:
if b>cur: # only do this once (only when b>cur - i.e. this is 1st CYS)
at2 = model.atom[b]
if at2.name=='SG':
if not at2.in_same_residue(at):
dst = distance(at.coord,at2.coord)
if dst<=MAX_BOND_LEN:
if forcefield:
for c in range(a[0],a[1]): # this residue
atx = model.atom[c]
atx.resn = 'CYX'
resn = atx.resn
if (c<=b):
k = ('CYX',atx.name)
if ffld.has_key(k):
atx.text_type = ffld[k]['type']
atx.partial_charge = ffld[k]['charge']
else:
raise RuntimeError("no parameters for '"+str(k)+"'")
for d in res_list: # other residue
if (b>=d[0]) and (b<d[1]):
for c in range(d[0],d[1]):
atx = model.atom[c]
atx.resn = 'CYX'
# since b>cur, assume assignment later on
break
#---------------------------------------------------------------------------------
def add_bonds(model, forcefield = protein_amber, histidine = 'HIE' ):
'''
add_bonds(model, forcefield = protein_amber, histidine = 'HIE' )
(1) fixes aliases, assigns types, makes HIS into HIE,HID, or HIP
and changes cystine to CYX
(2) adds bonds between existing atoms
'''
if feedback['actions']:
print " "+str(__name__)+": assigning types and bonds..."
if str(model.__class__) != 'chempy.models.Indexed':
raise ValueError('model is not an "Indexed" model object')
if model.nAtom:
crd = model.get_coord_list()
nbr = Neighbor(crd,MAX_BOND_LEN)
res_list = model.get_residues()
if len(res_list):
for a in res_list:
base = model.atom[a[0]]
if not base.hetatm:
resn = base.resn
if resn == 'HIS':
for c in range(a[0],a[1]): # this residue
model.atom[c].resn = histidine
resn = histidine
if resn == 'N-M': # N-methyl from Insight II,
for c in range(a[0],a[1]): # this residue
model.atom[c].resn = 'NME'
resn = 'NME'
# find out if this is n or c terminal residue
names = []
for b in range(a[0],a[1]):
names.append(model.atom[b].name)
tmpl = protein_residues.normal
if forcefield:
ffld = forcefield.normal
for b in N_TERMINAL_ATOMS:
if b in names:
tmpl = protein_residues.n_terminal
if forcefield:
ffld = forcefield.n_terminal
break
for b in C_TERMINAL_ATOMS:
if b in names:
tmpl = protein_residues.c_terminal
if forcefield:
ffld = forcefield.c_terminal
break
if not tmpl.has_key(resn):
raise RuntimeError("unknown residue type '"+resn+"'")
else:
# reassign atom names and build dictionary
dict = {}
aliases = tmpl[resn]['aliases']
for b in range(a[0],a[1]):
at = model.atom[b]
if aliases.has_key(at.name):
at.name = aliases[at.name]
dict[at.name] = b
if forcefield:
k = (resn,at.name)
if ffld.has_key(k):
at.text_type = ffld[k]['type']
at.partial_charge = ffld[k]['charge']
else:
raise RuntimeError("no parameters for '"+str(k)+"'")
# now add bonds for atoms which are present
bonds = tmpl[resn]['bonds']
mbond = model.bond
for b in bonds.keys():
if dict.has_key(b[0]) and dict.has_key(b[1]):
bnd = Bond()
bnd.index = [ dict[b[0]], dict[b[1]] ]
bnd.order = bonds[b]['order']
mbond.append(bnd)
if dict.has_key('N'): # connect residues N-C based on distance
cur_n = dict['N']
at = model.atom[cur_n]
lst = nbr.get_neighbors(at.coord)
for b in lst:
at2 = model.atom[b]
if at2.name=='C':
if not at2.in_same_residue(at):
dst = distance(at.coord,at2.coord)
if dst<=PEPT_CUTOFF:
bnd=Bond()
bnd.index = [cur_n,b]
bnd.order = 1
mbond.append(bnd)
break
if dict.has_key('SG'): # cysteine
cur = dict['SG']
at = model.atom[cur]
lst = nbr.get_neighbors(at.coord)
for b in lst:
if b>cur: # only do this once (only when b>cur - i.e. this is 1st CYS)
at2 = model.atom[b]
if at2.name=='SG':
if not at2.in_same_residue(at):
dst = distance(at.coord,at2.coord)
if dst<=MAX_BOND_LEN:
bnd=Bond()
bnd.index = [cur,b]
bnd.order = 1
mbond.append(bnd)
if forcefield:
for c in range(a[0],a[1]): # this residue
atx = model.atom[c]
atx.resn = 'CYX'
resn = atx.resn
k = ('CYX',atx.name)
if ffld.has_key(k):
atx.text_type = ffld[k]['type']
atx.partial_charge = ffld[k]['charge']
else:
raise RuntimeError("no parameters for '"+str(k)+"'")
for d in res_list:
if (b>=d[0]) and (b<d[1]): # find other residue
for c in range(d[0],d[1]):
atx = model.atom[c]
atx.resn = 'CYX'
# since b>cur, assume assignment later on
break
#---------------------------------------------------------------------------------
def add_hydrogens(model,forcefield=protein_amber,skip_sort=None):
# assumes no bonds between non-hetatms
if feedback['actions']:
print " "+str(__name__)+": adding hydrogens..."
if str(model.__class__) != 'chempy.models.Connected':
raise ValueError('model is not a "Connected" model object')
if model.nAtom:
if not model.index:
model.update_index()
res_list = model.get_residues()
if len(res_list):
for a in res_list:
base = model.atom[a[0]]
if not base.hetatm:
resn = base.resn
# find out if this is n or c terminal residue
names = []
for b in range(a[0],a[1]):
names.append(model.atom[b].name)
tmpl = protein_residues.normal
if forcefield:
ffld = forcefield.normal
for b in N_TERMINAL_ATOMS:
if b in names:
tmpl = protein_residues.n_terminal
if forcefield:
ffld = forcefield.n_terminal
break
for b in C_TERMINAL_ATOMS:
if b in names:
tmpl = protein_residues.c_terminal
if forcefield:
ffld = forcefield.c_terminal
break
if not tmpl.has_key(resn):
raise RuntimeError("unknown residue type '"+resn+"'")
else:
# build dictionary
dict = {}
for b in range(a[0],a[1]):
at = model.atom[b]
dict[at.name] = b
# find missing bonds with hydrogens
bonds = tmpl[resn]['bonds']
mbond = model.bond
for b in bonds.keys():
if dict.has_key(b[0]) and (not dict.has_key(b[1])):
at = model.atom[dict[b[0]]]
if at.symbol != 'H':
name = b[1]
symbol = tmpl[resn]['atoms'][name]['symbol']
if symbol == 'H':
newat = at.new_in_residue()
newat.name = name
newat.symbol = symbol
k = (resn,newat.name)
newat.text_type = ffld[k]['type']
newat.partial_charge = ffld[k]['charge']
idx1 = model.index[id(at)]
idx2 = model.add_atom(newat)
bnd = Bond()
bnd.index = [ idx1, idx2 ]
bnd.order = bonds[b]['order']
mbond[idx1].append(bnd)
mbond[idx2].append(bnd)
if (not dict.has_key(b[0])) and dict.has_key(b[1]):
at = model.atom[dict[b[1]]]
if at.symbol != 'H':
name = b[0]
symbol = tmpl[resn]['atoms'][name]['symbol']
if symbol == 'H':
newat = at.new_in_residue()
newat.name = name
newat.symbol = symbol
k = (resn,newat.name)
newat.text_type = ffld[k]['type']
newat.partial_charge = ffld[k]['charge']
idx1 = model.index[id(at)]
idx2 = model.add_atom(newat)
bnd = Bond()
bnd.index = [ idx1, idx2 ]
bnd.order = bonds[b]['order']
mbond[idx1].append(bnd)
mbond[idx2].append(bnd)
if not skip_sort:
model.sort()
|
gratefulfrog/lib
|
python/chempy/protein.py
|
Python
|
gpl-2.0
| 19,224
|
[
"ChemPy",
"PyMOL"
] |
1d25836613348a60d810d54908b0d28e192fe495f7522b4fe9dee937b43d9a54
|
from rdkit import DataStructs
from rdkit import RDConfig
import unittest,os
def feq(a,b,tol=1e-4):
return abs(a-b)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
self.dirname = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
self.filename = os.path.join(self.dirname,'zim.head100.fpb')
self.fpbr = DataStructs.FPBReader(self.filename)
self.fpbr.Init()
def test1Basics(self) :
self.assertEqual(len(self.fpbr),100)
self.assertEqual(self.fpbr.GetNumBits(),2048)
self.assertEqual(self.fpbr.GetId(0),"ZINC00902219")
self.assertEqual(self.fpbr.GetId(3),"ZINC04803506")
fp = self.fpbr.GetFP(0)
self.assertEqual(fp.GetNumBits(),2048)
self.assertEqual(fp.GetNumOnBits(),17)
obs = (1, 80, 183, 222, 227, 231, 482, 650, 807,
811, 831, 888, 1335, 1411, 1664, 1820, 1917)
obl = tuple(fp.GetOnBits())
self.assertEqual(obs,obl)
# test operator[]
fp,nm = self.fpbr[0]
self.assertEqual(nm,"ZINC00902219")
self.assertEqual(fp.GetNumOnBits(),17)
def test2Tanimoto(self) :
bv = self.fpbr.GetBytes(0)
self.assertAlmostEqual(self.fpbr.GetTanimoto(0,bv),1.0,4)
self.assertAlmostEqual(self.fpbr.GetTanimoto(1,bv),0.3704,4)
tpl = self.fpbr.GetTanimotoNeighbors(bv)
self.assertEqual(len(tpl),1)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
tpl = self.fpbr.GetTanimotoNeighbors(bv,threshold=0.3)
self.assertEqual(len(tpl),5)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
self.assertEqual(tpl[1][1],1)
self.assertAlmostEqual(tpl[1][0],0.3704,4)
def test3Tversky(self) :
bv = self.fpbr.GetBytes(0)
self.assertAlmostEqual(self.fpbr.GetTversky(0,bv,1,1),1.0,4)
self.assertAlmostEqual(self.fpbr.GetTversky(1,bv,1,1),0.3704,4)
tpl = self.fpbr.GetTverskyNeighbors(bv,1,1)
self.assertEqual(len(tpl),1)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
tpl = self.fpbr.GetTverskyNeighbors(bv,1,1,threshold=0.3)
self.assertEqual(len(tpl),5)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
self.assertEqual(tpl[1][1],1)
self.assertAlmostEqual(tpl[1][0],0.3704,4)
def test4Contains(self):
bv = self.fpbr.GetBytes(0)
nbrs = self.fpbr.GetContainingNeighbors(bv)
self.assertEqual(len(nbrs),1)
self.assertEqual(nbrs[0],0)
bv = self.fpbr.GetBytes(1)
nbrs = self.fpbr.GetContainingNeighbors(bv)
self.assertEqual(len(nbrs),4)
self.assertEqual(nbrs,(1,2,3,4))
def test5Contains(self):
" an example based on substructure screening "
filename = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData','zinc_all_clean.100.patt1k.fpb')
fpbr = DataStructs.FPBReader(filename)
fpbr.Init()
bytes = b'\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\x00@\x00 \x00\x00 \x00\x00\x02@\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x07\x00\x04\x00"\x14\x02\x00\x00"\x00\x00\x00\x00\x08\x00\x80\x00\x00@\x00@\x00\x80\x00\x00\x00\x00B\x00\x00\x80\x00\x80\x08\x00\x04\x00@\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x80\x04\x00\x00\x0c\x00\x00\x00@\x88\x10\x10\x00\x00\x88\x00@'
nbrs = fpbr.GetContainingNeighbors(bytes)
self.assertEqual(len(nbrs),9)
ids = sorted(fpbr.GetId(x) for x in nbrs)
self.assertEqual(ids,['ZINC00000562',
'ZINC00000843',
'ZINC00000969',
'ZINC00001484',
'ZINC00001585',
'ZINC00002094',
'ZINC00004739',
'ZINC00005235',
'ZINC00006300'])
def test6MultiFPBReaderTani(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader()
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.1.patt.fpb"))),1)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.2.patt.fpb"))),2)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.3.patt.fpb"))),3)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.4.patt.fpb"))),4)
mfpbr.Init();
self.assertEqual(mfpbr.GetNumBits(),1024);
self.assertEqual(len(mfpbr),4);
fps = "0000000000404000100000001000040000300040222000002004000240000020000000"+\
"8200010200000090000024040860070044003214820000220401054008018000226000"+\
"4800800140000042000080008008020482400000200410800000300430200800400000"+\
"0000080a0000800400010c800200648818100010880040"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetTanimotoNeighbors(bytes,threshold=0.6)
self.assertEqual(len(nbrs),6)
self.assertAlmostEqual(nbrs[0][0],0.66412,4)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[0][2],3)
self.assertAlmostEqual(nbrs[1][0],0.65289,4)
self.assertEqual(nbrs[1][1],1)
self.assertEqual(nbrs[1][2],2)
self.assertAlmostEqual(nbrs[2][0],0.64341,4)
self.assertEqual(nbrs[2][1],2)
self.assertEqual(nbrs[2][2],1)
self.assertAlmostEqual(nbrs[3][0],0.61940,4)
self.assertEqual(nbrs[3][1],1)
self.assertEqual(nbrs[3][2],0)
self.assertAlmostEqual(nbrs[4][0],0.61905,4)
self.assertEqual(nbrs[4][1],0)
self.assertEqual(nbrs[4][2],0)
self.assertAlmostEqual(nbrs[5][0],0.61344,4)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[5][2],1)
# test multi-threaded (won't do anything if the RDKit isn't compiled with threads support)
nbrs = mfpbr.GetTanimotoNeighbors(bytes,threshold=0.6,numThreads=4)
self.assertEqual(len(nbrs),6)
self.assertAlmostEqual(nbrs[0][0],0.66412,4)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[0][2],3)
self.assertAlmostEqual(nbrs[1][0],0.65289,4)
self.assertEqual(nbrs[1][1],1)
self.assertEqual(nbrs[1][2],2)
self.assertAlmostEqual(nbrs[2][0],0.64341,4)
self.assertEqual(nbrs[2][1],2)
self.assertEqual(nbrs[2][2],1)
self.assertAlmostEqual(nbrs[3][0],0.61940,4)
self.assertEqual(nbrs[3][1],1)
self.assertEqual(nbrs[3][2],0)
self.assertAlmostEqual(nbrs[4][0],0.61905,4)
self.assertEqual(nbrs[4][1],0)
self.assertEqual(nbrs[4][2],0)
self.assertAlmostEqual(nbrs[5][0],0.61344,4)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[5][2],1)
def test7MultiFPBReaderContains(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader()
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.1.patt.fpb"))),1)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.2.patt.fpb"))),2)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.3.patt.fpb"))),3)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.4.patt.fpb"))),4)
mfpbr.Init();
self.assertEqual(mfpbr.GetNumBits(),1024);
self.assertEqual(len(mfpbr),4);
fps = "40081010824820021000500010110410003000402b20285000a4040240010030050000"+\
"080001420040009000003d04086007080c03b31d920004220400074008098010206080"+\
"00488001080000c64002a00080000200024c2000602410049200340820200002400010"+\
"02200106090401056801080182006088101000088a0048";
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetContainingNeighbors(bytes)
self.assertEqual(len(nbrs),9)
self.assertEqual(nbrs[0][0],160)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[1][0],163)
self.assertEqual(nbrs[1][1],0)
self.assertEqual(nbrs[2][0],170)
self.assertEqual(nbrs[2][1],0)
self.assertEqual(nbrs[3][0],180)
self.assertEqual(nbrs[3][1],2)
self.assertEqual(nbrs[4][0],182)
self.assertEqual(nbrs[4][1],3)
self.assertEqual(nbrs[5][0],185)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[6][0],189)
self.assertEqual(nbrs[6][1],0)
self.assertEqual(nbrs[7][0],192)
self.assertEqual(nbrs[7][1],3)
self.assertEqual(nbrs[8][0],193)
self.assertEqual(nbrs[8][1],0)
nbrs = mfpbr.GetContainingNeighbors(bytes,numThreads=4)
self.assertEqual(len(nbrs),9)
self.assertEqual(nbrs[0][0],160)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[1][0],163)
self.assertEqual(nbrs[1][1],0)
self.assertEqual(nbrs[2][0],170)
self.assertEqual(nbrs[2][1],0)
self.assertEqual(nbrs[3][0],180)
self.assertEqual(nbrs[3][1],2)
self.assertEqual(nbrs[4][0],182)
self.assertEqual(nbrs[4][1],3)
self.assertEqual(nbrs[5][0],185)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[6][0],189)
self.assertEqual(nbrs[6][1],0)
self.assertEqual(nbrs[7][0],192)
self.assertEqual(nbrs[7][1],3)
self.assertEqual(nbrs[8][0],193)
self.assertEqual(nbrs[8][1],0)
def test8MultiFPBReaderContainsInitOnSearch(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader(initOnSearch=True)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.1.patt.fpb"))),1)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.2.patt.fpb"))),2)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.3.patt.fpb"))),3)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.4.patt.fpb"))),4)
fps = "40081010824820021000500010110410003000402b20285000a4040240010030050000"+\
"080001420040009000003d04086007080c03b31d920004220400074008098010206080"+\
"00488001080000c64002a00080000200024c2000602410049200340820200002400010"+\
"02200106090401056801080182006088101000088a0048";
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetContainingNeighbors(bytes,numThreads=4)
self.assertEqual(len(nbrs),9)
self.assertEqual(nbrs[0][0],160)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[1][0],163)
self.assertEqual(nbrs[1][1],0)
self.assertEqual(nbrs[2][0],170)
self.assertEqual(nbrs[2][1],0)
self.assertEqual(nbrs[3][0],180)
self.assertEqual(nbrs[3][1],2)
self.assertEqual(nbrs[4][0],182)
self.assertEqual(nbrs[4][1],3)
self.assertEqual(nbrs[5][0],185)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[6][0],189)
self.assertEqual(nbrs[6][1],0)
self.assertEqual(nbrs[7][0],192)
self.assertEqual(nbrs[7][1],3)
self.assertEqual(nbrs[8][0],193)
self.assertEqual(nbrs[8][1],0)
def test9MultiFPBReaderEdges(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader()
mfpbr.Init();
fps = "0000000000404000100000001000040000300040222000002004000240000020000000"+\
"8200010200000090000024040860070044003214820000220401054008018000226000"+\
"4800800140000042000080008008020482400000200410800000300430200800400000"+\
"0000080a0000800400010c800200648818100010880040"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetTanimotoNeighbors(bytes,threshold=0.6)
self.assertEqual(len(nbrs),0)
if __name__ == '__main__':
unittest.main()
|
adalke/rdkit
|
Code/DataStructs/Wrap/testFPB.py
|
Python
|
bsd-3-clause
| 12,163
|
[
"RDKit"
] |
6b5ff0f82e38f363792f3e6d50d155338d063cc7151abb51177f390eff896d46
|
from django.apps import apps as django_apps
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
@receiver(post_save, weak=False, dispatch_uid="metadata_create_on_post_save")
def metadata_create_on_post_save(sender, instance, raw, created, using,
update_fields, **kwargs):
"""Create all meta data on post save of model using
CreatesMetaDataModelMixin.
For example, when saving the visit model.
"""
if not raw:
try:
instance.reference_creator_cls(model_obj=instance)
except AttributeError:
pass
try:
instance.metadata_create(sender=sender, instance=instance)
except AttributeError as e:
if 'metadata_create' not in str(e):
raise
else:
if django_apps.get_app_config('edc_metadata_rules').metadata_rules_enabled:
instance.run_metadata_rules()
@receiver(post_save, weak=False, dispatch_uid="metadata_update_on_post_save")
def metadata_update_on_post_save(sender, instance, raw, created, using,
update_fields, **kwargs):
"""Update the meta data record on post save of a CRF model.
"""
if not raw and not update_fields:
try:
instance.reference_updater_cls(model_obj=instance)
except AttributeError:
pass
try:
instance.metadata_update()
except AttributeError as e:
if 'metadata_update' not in str(e):
raise
else:
if django_apps.get_app_config('edc_metadata_rules').metadata_rules_enabled:
instance.run_metadata_rules_for_crf()
@receiver(post_delete, weak=False, dispatch_uid="metadata_reset_on_post_delete")
def metadata_reset_on_post_delete(sender, instance, using, **kwargs):
"""Deletes a single instance used by UpdatesMetadataMixin.
Calls reference_deleter_cls in case this signal fires before
the post_delete signal in edc_reference.
"""
try:
instance.reference_deleter_cls(model_obj=instance)
except AttributeError:
pass
try:
instance.metadata_reset_on_delete()
except AttributeError as e:
if 'metadata_reset_on_delete' not in str(e):
raise
else:
if django_apps.get_app_config('edc_metadata_rules').metadata_rules_enabled:
instance.run_metadata_rules_for_crf()
# deletes all for a visit used by CreatesMetadataMixin
try:
instance.metadata_delete_for_visit()
except AttributeError as e:
if 'metadata_delete_for_visit' not in str(e):
raise
|
botswana-harvard/edc-meta-data
|
edc_metadata/signals.py
|
Python
|
gpl-2.0
| 2,695
|
[
"VisIt"
] |
67d58ffa616effadac9496841de247dba534edb1886e38852353afc0a060be0d
|
from __future__ import print_function
from miasm.expression.expression import *
from pdb import pm
print("""
Expression simplification demo.
(and regression test)
""")
a = ExprId('a', 32)
b = ExprId('b', 32)
c = ExprId('c', 32)
d = ExprId('d', 32)
e = ExprId('e', 32)
m = ExprMem(a, 32)
s = a[:8]
i1 = ExprInt(0x1, 32)
i2 = ExprInt(0x2, 32)
cc = ExprCond(a, b, c)
o = ExprCompose(a[8:16], a[:8])
o2 = ExprCompose(a[8:16], a[:8])
l = [a[:8], b[:8], c[:8], m[:8], s, i1[:8], i2[:8], o[:8]]
l2 = l[::-1]
x = ExprMem(a + b + ExprInt(0x42, 32), 32)
def replace_expr(e):
dct = {c + ExprInt(0x42, 32): d,
a + b: c, }
if e in dct:
return dct[e]
return e
print(x)
y = x.visit(replace_expr)
print(y)
print(x.copy())
print(y.copy())
print(y == y.copy())
print(repr(y), repr(y.copy()))
z = ExprCompose(a[5:5 + 8], b[:16], x[:8])
print(z)
print(z.copy())
print(z[:31].copy().visit(replace_expr))
print('replace')
print(x.replace_expr({c + ExprInt(0x42, 32): d,
a + b: c, }))
print(z.replace_expr({c + ExprInt(0x42, 32): d,
a + b: c, }))
u = z.copy()
print(u)
|
mrphrazer/miasm
|
example/expression/simplification_tools.py
|
Python
|
gpl-2.0
| 1,140
|
[
"VisIt"
] |
7938aed94618325d5864aa05427937af5897cd8fefcb9981a7ae19e09d9bbac8
|
"""
.. module:: VisVTK
:platform: Unix, Windows
:synopsis: VTK visualization component for NURBS-Python
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
from random import random
from . import vis
from . import vtk_helpers as vtkh
import numpy as np
from vtk.util.numpy_support import numpy_to_vtk
from vtk import VTK_FLOAT
class VisConfig(vis.VisConfigAbstract):
""" Configuration class for VTK visualization module.
This class is only required when you would like to change the visual defaults of the plots and the figure.
The ``VisVTK`` module has the following configuration variables:
* ``ctrlpts`` (bool): Control points polygon/grid visibility. *Default: True*
* ``evalpts`` (bool): Curve/surface points visibility. *Default: True*
* ``trims`` (bool): Trim curve visibility. *Default: True*
* ``trim_size`` (int): Size of the trim curves. *Default: 4*
* ``figure_size`` (list): Size of the figure in (x, y). *Default: (800, 600)*
* ``line_width`` (int): Thickness of the lines on the figure. *Default: 1.0*
"""
def __init__(self, **kwargs):
super(VisConfig, self).__init__(**kwargs)
self._bg = ( # background colors
(0.5, 0.5, 0.5), (0.2, 0.2, 0.2), (0.25, 0.5, 0.75), (1.0, 1.0, 0.0),
(1.0, 0.5, 0.0), (0.5, 0.0, 1.0), (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
)
self._bg_id = 0 # used for keeping track of the background numbering
self.display_ctrlpts = kwargs.get('ctrlpts', True)
self.display_evalpts = kwargs.get('evalpts', True)
self.display_trims = kwargs.get('trims', True)
self.trim_size = kwargs.get('trim_size', 4)
self.figure_size = kwargs.get('figure_size', (800, 600)) # size of the render window
self.line_width = kwargs.get('line_width', 1.0)
def keypress_callback(self, obj, ev):
""" VTK callback for keypress events.
Keypress events:
* ``e``: exit the application
* ``p``: pick object (hover the mouse and then press to pick)
* ``f``: fly to point (click somewhere in the window and press to fly)
* ``r``: reset the camera
* ``s`` and ``w``: switch between solid and wireframe modes
* ``b``: change background color
* ``m``: change color of the picked object
* ``d``: print debug information (of picked object, point, etc.)
* ``h``: change object visibility
* ``n``: reset object visibility
* ``arrow keys``: pan the model
Please refer to `vtkInteractorStyle <https://vtk.org/doc/nightly/html/classvtkInteractorStyle.html>`_ class
reference for more details.
:param obj: render window interactor
:type obj: vtkRenderWindowInteractor
:param ev: event name
:type ev: str
"""
key = obj.GetKeySym() # pressed key (as str)
render_window = obj.GetRenderWindow() # vtkRenderWindow
renderer = render_window.GetRenderers().GetFirstRenderer() # vtkRenderer
picker = obj.GetPicker() # vtkPropPicker
actor = picker.GetActor() # vtkActor
# Custom keypress events
if key == 'Up':
camera = renderer.GetActiveCamera() # vtkCamera
camera.Pitch(2.5)
if key == 'Down':
camera = renderer.GetActiveCamera() # vtkCamera
camera.Pitch(-2.5)
if key == 'Left':
camera = renderer.GetActiveCamera() # vtkCamera
camera.Yaw(-2.5)
if key == 'Right':
camera = renderer.GetActiveCamera() # vtkCamera
camera.Yaw(2.5)
if key == 'b':
if self._bg_id >= len(self._bg):
self._bg_id = 0
renderer.SetBackground(*self._bg[self._bg_id])
self._bg_id += 1
if key == 'm':
if actor is not None:
actor.GetProperty().SetColor(random(), random(), random())
if key == 'd':
if actor is not None:
print("Name:", actor.GetMapper().GetArrayName())
print("Index:", actor.GetMapper().GetArrayId())
print("Selected point:", picker.GetSelectionPoint()[0:2])
print("# of visible actors:", renderer.VisibleActorCount())
if key == 'h':
if actor is not None:
actor.SetVisibility(not actor.GetVisibility())
if key == 'n':
actors = renderer.GetActors() # vtkActorCollection
for actor in actors:
actor.VisibilityOn()
# Update render window
render_window.Render()
class VisCurve3D(vis.VisAbstract):
""" VTK visualization module for curves. """
def __init__(self, config=VisConfig(), **kwargs):
super(VisCurve3D, self).__init__(config, **kwargs)
def render(self, **kwargs):
""" Plots the curve and the control points polygon. """
# Calling parent function
super(VisCurve3D, self).render(**kwargs)
# Initialize a list to store VTK actors
vtk_actors = []
# Start plotting
for plot in self._plots:
# Plot control points
if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts:
# Points as spheres
pts = np.array(plot['ptsarr'], dtype=np.float)
# Handle 2-dimensional data
if pts.shape[1] == 2:
pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float)]
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
actor1 = vtkh.create_actor_pts(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], idx=plot['idx'])
vtk_actors.append(actor1)
# Lines
actor2 = vtkh.create_actor_polygon(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'], size=self.vconf.line_width)
vtk_actors.append(actor2)
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
pts = np.array(plot['ptsarr'], dtype=np.float)
# Handle 2-dimensional data
if pts.shape[1] == 2:
pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float)]
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
actor1 = vtkh.create_actor_polygon(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'], size=self.vconf.line_width * 2)
vtk_actors.append(actor1)
# Render actors
return vtkh.create_render_window(vtk_actors, dict(KeyPressEvent=(self.vconf.keypress_callback, 1.0)),
figure_size=self.vconf.figure_size)
# It is easier to plot 2-dimensional curves with VisCurve3D
VisCurve2D = VisCurve3D
class VisSurface(vis.VisAbstract):
""" VTK visualization module for surfaces. """
def __init__(self, config=VisConfig(), **kwargs):
super(VisSurface, self).__init__(config, **kwargs)
self._module_config['ctrlpts'] = "quads"
self._module_config['evalpts'] = "triangles"
def render(self, **kwargs):
""" Plots the surface and the control points grid. """
# Calling parent function
super(VisSurface, self).render(**kwargs)
# Initialize a list to store VTK actors
vtk_actors = []
# Start plotting
for plot in self._plots:
# Plot control points
if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts:
vertices = [v.data for v in plot['ptsarr'][0]]
faces = [q.data for q in plot['ptsarr'][1]]
# Points as spheres
pts = np.array(vertices, dtype=np.float)
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
actor1 = vtkh.create_actor_pts(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'])
vtk_actors.append(actor1)
# Quad mesh
lines = np.array(faces, dtype=np.int)
actor2 = vtkh.create_actor_mesh(pts=vtkpts, lines=lines, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'], size=self.vconf.line_width)
vtk_actors.append(actor2)
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
vertices = [v.data for v in plot['ptsarr'][0]]
vtkpts = numpy_to_vtk(vertices, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
faces = [t.data for t in plot['ptsarr'][1]]
tris = np.array(faces, dtype=np.int)
actor1 = vtkh.create_actor_tri(pts=vtkpts, tris=tris, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'])
vtk_actors.append(actor1)
# Plot trim curves
if self.vconf.display_trims:
if plot['type'] == 'trimcurve':
pts = np.array(plot['ptsarr'], dtype=np.float)
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
actor1 = vtkh.create_actor_polygon(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'], size=self.vconf.trim_size)
vtk_actors.append(actor1)
# Render actors
return vtkh.create_render_window(vtk_actors, dict(KeyPressEvent=(self.vconf.keypress_callback, 1.0)),
figure_size=self.vconf.figure_size)
class VisVolume(vis.VisAbstract):
""" VTK visualization module for volumes. """
def __init__(self, config=VisConfig(), **kwargs):
super(VisVolume, self).__init__(config, **kwargs)
self._module_config['ctrlpts'] = "points"
self._module_config['evalpts'] = "points"
def render(self, **kwargs):
""" Plots the volume and the control points. """
# Calling parent function
super(VisVolume, self).render(**kwargs)
# Initialize a list to store VTK actors
vtk_actors = []
# Start plotting
for plot in self._plots:
# Plot control points
if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts:
# Points as spheres
pts = np.array(plot['ptsarr'], dtype=np.float)
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
temp_actor = vtkh.create_actor_pts(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'])
vtk_actors.append(temp_actor)
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
pts = np.array(plot['ptsarr'], dtype=np.float)
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
temp_actor = vtkh.create_actor_pts(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'])
vtk_actors.append(temp_actor)
# Render actors
return vtkh.create_render_window(vtk_actors, dict(KeyPressEvent=(self.vconf.keypress_callback, 1.0)),
figure_size=self.vconf.figure_size)
class VisVoxel(vis.VisAbstract):
""" VTK visualization module for voxel representation of the volumes. """
def __init__(self, config=VisConfig(), **kwargs):
super(VisVoxel, self).__init__(config, **kwargs)
self._module_config['ctrlpts'] = "points"
self._module_config['evalpts'] = "voxels"
def render(self, **kwargs):
""" Plots the volume and the control points. """
# Calling parent function
super(VisVoxel, self).render(**kwargs)
# Initialize a list to store VTK actors
vtk_actors = []
# Start plotting
for plot in self._plots:
# Plot control points
if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts:
# Points as spheres
pts = np.array(plot['ptsarr'], dtype=np.float)
vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT)
vtkpts.SetName(plot['name'])
temp_actor = vtkh.create_actor_pts(pts=vtkpts, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'])
vtk_actors.append(temp_actor)
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
faces = np.array(plot['ptsarr'][1], dtype=np.float)
filled = np.array(plot['ptsarr'][2], dtype=np.int)
grid_filled = faces[filled == 1]
temp_actor = vtkh.create_actor_hexahedron(grid=grid_filled, color=vtkh.create_color(plot['color']),
name=plot['name'], index=plot['idx'])
vtk_actors.append(temp_actor)
# Render actors
return vtkh.create_render_window(vtk_actors, dict(KeyPressEvent=(self.vconf.keypress_callback, 1.0)),
figure_size=self.vconf.figure_size)
|
orbingol/NURBS-Python
|
geomdl/visualization/VisVTK.py
|
Python
|
mit
| 14,148
|
[
"VTK"
] |
ac7289b92e12dd125e079bfe9de809bd740168b999aecf4c97bdfbc827300f71
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet-50 on ImageNet using HetSNGP.
Spectral-normalized neural GP (SNGP) [1] is a simple method to improve
a deterministic neural network's uncertainty by applying spectral
normalization to hidden weights, and then replace the dense output layer with
a Gaussian process.
## Note:
Different from the paper, this implementation computes the posterior using the
Laplace approximation based on the Gaussian likelihood (i.e., squared loss)
rather than that based on cross-entropy loss. As a result, the logits for all
classes share the same covariance. In the experiments, this approach is shown to
perform better and computationally more scalable when the number of output
classes are large.
## References:
[1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with
Deterministic Deep Learning via Distance Awareness.
_arXiv preprint arXiv:2006.10108_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
[3]: Felix Xinnan Yu et al. Orthogonal Random Features. In _Neural Information
Processing Systems_, 2016.
https://papers.nips.cc/paper/6246-orthogonal-random-features.pdf
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import from baselines.imagenet
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer('per_core_batch_size', 128, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('base_learning_rate', 0.07,
'Base learning rate when train batch size is 256.')
flags.DEFINE_float('one_minus_momentum', 0.1, 'Optimizer momentum.')
flags.DEFINE_float('l2', 1e-4, 'L2 coefficient.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 270, 'Number of training epochs.')
flags.DEFINE_integer('corruptions_interval', 270,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer(
'checkpoint_interval', -1,
'Number of epochs between saving checkpoints. Use -1 to '
'only save the last checkpoints.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
flags.DEFINE_float('train_proportion', default=1.0,
help='only use a proportion of training set and use the'
'rest for validation instead of the test set.')
flags.register_validator('train_proportion',
lambda tp: tp > 0.0 and tp <= 1.0,
message='--train_proportion must be in (0, 1].')
# Dropout flags.
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout during inference.')
flags.DEFINE_float('dropout_rate', 0., 'Dropout rate.')
flags.DEFINE_bool(
'filterwise_dropout', True, 'Dropout whole convolutional'
'filters instead of individual values in the feature map.')
flags.DEFINE_integer('num_dropout_samples', 1,
'Number of samples to use for MC Dropout prediction.')
# Spectral normalization flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input for GP layer using LayerNorm. This is '
'similar to applying automatic relevance determination (ARD) in the '
'classic GP literature.')
flags.DEFINE_string(
'gp_random_feature_type', 'orf',
'The type of random feature to use. One of "rff" (random Fourier feature), '
'"orf" (orthogonal random feature) [3].')
flags.DEFINE_float('gp_cov_ridge_penalty', 1.,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', -1.,
'The discount factor to compute the moving average of precision matrix.'
'If -1 then instead compute the exact covariance at the lastest epoch.')
flags.DEFINE_bool(
'gp_output_imagenet_initializer', True,
'Whether to initialize GP output layer using Gaussian with small '
'standard deviation (sd=0.01).')
# heteroscedastic flags
flags.DEFINE_integer('num_factors', 15,
'Num factors to approximate full rank covariance matrix.')
flags.DEFINE_float('temperature', 1.25,
'Temperature for heteroscedastic head.')
flags.DEFINE_integer('num_mc_samples', 5000,
'Num MC samples for heteroscedastic layer.')
# HetSNGP-specific flags
flags.DEFINE_float('sngp_var_weight', 1., 'Weight for the SNGP variance.')
flags.DEFINE_float('het_var_weight', 1., 'Weight for the het. variance.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
# TODO(jereliu): Support use_bfloat16=True which currently raises error with
# spectral normalization.
flags.DEFINE_bool('use_bfloat16', True, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = int(1281167 * FLAGS.train_proportion)
# Number of images in eval dataset.
if FLAGS.train_proportion != 1.:
IMAGENET_VALIDATION_IMAGES = 1281167 - APPROX_IMAGENET_TRAIN_IMAGES
else:
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
train_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
validation_percent=1. - FLAGS.train_proportion,
data_dir=data_dir)
train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy)
if FLAGS.train_proportion != 1.:
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.VALIDATION,
use_bfloat16=FLAGS.use_bfloat16,
validation_percent=1. - FLAGS.train_proportion,
data_dir=data_dir)
else:
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TEST,
use_bfloat16=FLAGS.use_bfloat16,
data_dir=data_dir)
clean_test_dataset = test_builder.load(
batch_size=batch_size, strategy=strategy)
test_datasets = {
'clean': clean_test_dataset
}
if FLAGS.corruptions_interval > 0:
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
dataset = utils.load_corrupted_test_dataset(
batch_size=batch_size,
corruption_name=name,
corruption_intensity=intensity,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets[dataset_name] = (
strategy.experimental_distribute_dataset(dataset))
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
with strategy.scope():
logging.info('Building Keras HetSNGP ResNet-50 model')
model = ub.models.resnet50_hetsngp(
input_shape=(224, 224, 3),
batch_size=None,
num_classes=NUM_CLASSES,
num_factors=FLAGS.num_factors,
use_mc_dropout=FLAGS.use_mc_dropout,
dropout_rate=FLAGS.dropout_rate,
filterwise_dropout=FLAGS.filterwise_dropout,
use_gp_layer=FLAGS.use_gp_layer,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_random_feature_type=FLAGS.gp_random_feature_type,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
gp_output_imagenet_initializer=FLAGS.gp_output_imagenet_initializer,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound,
temperature=FLAGS.temperature,
num_mc_samples=FLAGS.num_mc_samples,
sngp_var_weight=FLAGS.sngp_var_weight,
het_var_weight=FLAGS.het_var_weight)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Scale learning rate and decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 256
decay_epochs = [
(FLAGS.train_epochs * 30) // 90,
(FLAGS.train_epochs * 60) // 90,
(FLAGS.train_epochs * 80) // 90,
]
learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch=steps_per_epoch,
base_learning_rate=base_lr,
decay_ratio=0.1,
decay_epochs=decay_epochs,
warmup_epochs=5)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
momentum=1.0 - FLAGS.one_minus_momentum,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/stddev': tf.keras.metrics.Mean(),
}
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
logging.info('Finished building Keras ResNet-50 model')
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs, step):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
# Reset covaraince estimator at the begining of a new epoch.
model.get_layer('SNGP_layer').reset_covariance_matrix()
with tf.GradientTape() as tape:
logits = model(images, training=True)
if isinstance(logits, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract logits
logits, _ = logits
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the weights. This excludes BN parameters and biases, but
# pay caution to their naming scheme.
if 'kernel' in var.name or 'bias' in var.name:
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(logits)
metrics['train/ece'].add_batch(probs, label=labels)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
for step in tf.range(tf.cast(steps_per_epoch, tf.int32)):
strategy.run(step_fn, args=(next(iterator), step))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
logits_list = []
stddev_list = []
for _ in range(FLAGS.num_dropout_samples):
logits = model(images, training=False)
if isinstance(logits, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract both
logits, covmat = logits
else:
covmat = tf.eye(FLAGS.per_core_batch_size)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
stddev = tf.sqrt(tf.linalg.diag_part(covmat))
stddev_list.append(stddev)
logits_list.append(logits)
# Logits dimension is (num_samples, batch_size, num_classes).
logits_list = tf.stack(logits_list, axis=0)
stddev_list = tf.stack(stddev_list, axis=0)
stddev = tf.reduce_mean(stddev_list, axis=0)
probs_list = tf.nn.softmax(logits_list)
probs = tf.reduce_mean(probs_list, axis=0)
labels_broadcasted = tf.broadcast_to(
labels, [FLAGS.num_dropout_samples, tf.shape(labels)[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits_list, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
tf.math.log(float(FLAGS.num_dropout_samples)))
if dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
metrics['test/stddev'].update_state(stddev)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
corrupt_metrics['test/stddev_{}'.format(dataset_name)].update_state(
stddev)
for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
train_step(train_iterator)
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
logging.info('Starting to run eval at epoch: %s', epoch)
test_start_time = time.time()
test_step(test_iterator, dataset_name)
ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
metrics['test/ms_per_example'].update_state(ms_per_example)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(
corrupt_metrics, corruption_types, max_intensity,
FLAGS.alexnet_errors_path)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(os.path.join(
FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
# Save final checkpoint.
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
# Export final model as SavedModel.
final_save_name = os.path.join(FLAGS.output_dir, 'model')
model.save(final_save_name)
logging.info('Saved model to %s', final_save_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
})
if __name__ == '__main__':
app.run(main)
|
google/uncertainty-baselines
|
baselines/imagenet/hetsngp.py
|
Python
|
apache-2.0
| 22,003
|
[
"Gaussian"
] |
727017073b26a4442baa986e803a71d061ff1a7248da12043d84b93350ebbaa4
|
#!/usr/bin/python2.5
################################################################################
# A script for getting the inchi file from KEGG and translating it to a giant
# SDF (a multi-molfile) so that it can be used with WebGCM to get the
# free energy of formation for each compound in KEGG
################################################################################
import kegg
import pybel
kegg = kegg.Kegg()
kegg.prepare_database()
inchi_file = open(kegg.INCHI_FILE, 'r')
sd_file = open('../kegg/compounds.sdf', 'w')
for line in inchi_file.readlines():
(cid, inchi) = line.strip().split()
molecule = pybel.readstring('inchi', inchi)
mol = molecule.write('sdf')
molfile_lines = mol.split('\n')
if (len(molfile_lines) < 4):
print "ERROR: " + cid
continue
molfile_lines[1] = " " + cid
#print mol.calcdesc()
sd_file.write('\n'.join(molfile_lines))
sd_file.close()
|
eladnoor/carbofix
|
src/gibbs.py
|
Python
|
mit
| 934
|
[
"Pybel"
] |
ff771db5c32bacbea9f9fa77ffc82bc9f04698cc17ce26614ac38853d2fc5769
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import math
from bisect import bisect_left
import logging
import json
import bpy
import bmesh
import blf
from bpy.types import Operator
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
BoolVectorProperty,
PointerProperty,
CollectionProperty,
EnumProperty)
from bpy_extras import view3d_utils
from mathutils import Vector, Matrix
from molblend.elements_default import ELEMENTS as ELEMENTS_DEFAULT
logger = logging.getLogger(__name__)
class enums():
object_types = [
('NONE', "None", "None"),
('ATOM', "Atom", "Atom"),
('BOND', "Bond", "Bond"),
('UC', "Unit cell", "Unit cell"),
('DIPOLE', "Dipole", "Dipole"),
('MODE_ARROW', "Mode arrow", "Mode arrow"),
('PARENT', "Parent", "Parent"),
]
mesh_types = [
('NONE', "None", "None"),
('ELEMENT', "Element", "Element"),
('BOND', "Bond", "Bond"),
]
text_types = [
('NONE', "None", "None"),
('MODES', "Modes", "Modes"),
]
radius_types = [
('covalent', 'covalent', 'covalent'),
('vdw', 'van der Waals', 'van der Waals'),
('constant', 'constant', 'constant'),
]
molecule_styles = [
('BALLS', 'Balls', 'Space filling'),
('BAS', 'Balls and Sticks', 'Balls and Sticks'),
('STICKS', 'Sticks', 'Sticks'),
]
bond_material = [
('ATOMS', "Atoms", "Same as atoms"),
('GENERIC', "Generic" , "Single bond color"),
]
bond_types = [
('CONSTRAINT', "constraint", "constrained by the two bonded atoms"),
('STATIC', "static", "independent bonds, don't move with atoms"),
]
geometries = [
('NONE', "None", "No geometry constraints"),
('GENERAL', "General",
"Angles are multiples of 30 and 45 deg. in the view plane"),
('LINEAR', "Linear", "Linear or sp"),
('TRIGONAL', "Trig. planar", "Trigonal planar or sp2"),
]
angstrom_per_unit = [
('1.0', "Angstrom", "Angstrom"),
('0.529177249', "Bohr", "Bohr"),
('0.01', "pm", "Picometer"),
('OTHER', "Other", "Custom Unit"),
]
file_types = [
('XYZ', "xyz", "xyz format"),
('PDB', "pdb", "Protein Databank format"),
('B4W', "b4w", "standalone HTML with embedded 3D viewer")
]
mode_file_format = [
('ANADDB', "anaddb", "Abinit/anaddb output"),
('QE_DYNMAT', "QE dynmat", "Quantum ESPRESSO output"),
('XYZ', "xyz", "xyz-style format"),
('PHONOPY', "phonopy", "phonopy/v_sim ascii format"),
]
iso_val = [
('VOLFRAC', "volume fraction", "iso value by fraction of volume"),
('ABSOLUTE', "absolute value", "iso value by absolute value in cube file"),
]
#--- Update functions --------------------------------------------------------#
def update_all_meshes(self, context):
if context.scene.mb.is_initialized:
# TODO this callback might be too heavy for scenes with lots of meshes
for me in bpy.data.meshes:
me.update()
def update_active_mode(self, context):
if len(self.qvecs) == 0:
if context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
context.scene.frame_current = 1
return
if self.active_mode == 0:
self['mode']['freq'] = 'equilibrium'
else:
try:
qpt = json.loads(self.qvecs[self.active_nqpt].mode_txt.as_string())
self['mode'] = qpt['modes'][self.active_mode-1]
except:
logger.error("Problem loading mode from text object.")
logger.exception("")
if self.active_mode == 0:
return
else:
self.active_mode = 0
return
if self.active_mode > 0 and self.active_mode > len(qpt['modes']):
self.active_mode = len(qpt['modes']) - 1
# since this calls this callback again, return
return
for atom in self.objects.atoms:
update_mode_action(atom, self)
if (self.active_mode == 0 or not self.autoplay_mode_animation):
# stop animation
if context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
context.scene.frame_current = 1
else:
# start animation
if not context.screen.is_animation_playing:
context.scene.frame_end = 20
bpy.ops.screen.animation_play()
def update_show_mode_arrows(self, context):
bpy.ops.mb.toggle_mode_arrows('INVOKE_DEFAULT')
def update_show_unit_cell_arrows(self, context):
bpy.ops.mb.toggle_unit_cell_arrows('INVOKE_DEFAULT')
def update_show_unit_cell_frame(self, context):
bpy.ops.mb.toggle_unit_cell_frame('INVOKE_DEFAULT')
def update_atom_element(self, context):
'''
assign a mesh, give a new object name etc.
'''
# remove all spaces
if self.element.strip() != self.element:
self.element = self.element.strip()
return
# Comparison is case insensitive
# get dictionary that maps all lower case elements to exact upper/lower
# case as it appears in the list
elements_map = dict([(element.name.lower(), element.name)
for element in context.scene.mb.elements])
# if element is not yet in scene elements list, add new element.
# Use default settings
if self.element.lower() not in elements_map:
add_element(context, self.element, ELEMENTS_DEFAULT["Default"])
# adjust case of entered element to match already existing element
elif self.element not in elements_map.values():
self.element = elements_map[self.element.lower()]
# because this assignment calls this function again, just return
return
# get object and molecule to change all element specific properties
atom_ob = self.object
molecule = self.get_molecule()
# update mesh and material
me = get_atom_data(self.element, molecule)
atom_ob.data = me
assign_atom_material(atom_ob, molecule)
set_atom_drivers(context, atom_ob, molecule)
# update bond materials
for bond in self.bonds:
assign_bond_material(bond)
# assign type last, to be able to check if element is newly assigned or
# just updated
self.type = 'ATOM'
def update_bond_material(self, context):
for bond in self.objects.bonds:
assign_bond_material(bond)
def update_refine_atoms(self, context):
if self.refine_atoms < 2:
self.refine_atoms = 2
return
replaced_elements = set()
for mesh in self.meshes:
if mesh.data.mb.type == "ELEMENT":
element = mesh.name
#if not element in replaced_elements:
data = mesh.data
# get new temporary atom mesh with new refine value
new_data = get_atom_data(element, self, type='MESH',
mesh_name="tmp_mesh")
# replace mesh data
bm = bmesh.new()
bm.from_mesh(new_data)
bm.to_mesh(data)
bm.free()
replaced_elements.add(element)
# delete temporary mesh
bpy.data.meshes.remove(new_data)
def update_refine_bonds(self, context):
if self.refine_bonds < 2:
self.refine_bonds = 2
return
mesh = self.meshes.get("bond")
if mesh:
data = mesh.data
# get new temporary bond mesh with new refine value
name = "tmp_mesh"
new_data = get_bond_data(self, type='MESH', mesh_name="tmp_mesh")
# replace mesh data
bm = bmesh.new()
bm.from_mesh(new_data)
bm.to_mesh(data)
bm.free()
# delete temporary mesh
bpy.data.meshes.remove(new_data)
def update_export_file_type(self, context):
"""
Change file extension when filetype is changed. Replace known extensions.
Append to everything else.
"""
if self.filepath:
filetypes = {'XYZ': ".xyz",
'PDB': ".pdb"}
ext = filetypes[self.file_type]
other_ext = [f for f in filetypes.values() if f != ext]
if self.filepath[-4:] in other_ext:
self.filepath = self.filepath[:-4] + ext
else:
self.filepath = self.filepath + ext
def update_show_bond_lengths(self, context):
if self.show_bond_lengths:
bpy.ops.mb.show_bond_lengths()
def update_show_bond_angles(self, context):
if self.show_bond_angles:
bpy.ops.mb.show_bond_angles()
def update_radius_type(self, context):
for atom in self.objects.atoms:
set_atom_drivers(context, atom, self)
def update_draw_style(self, context):
for atom in self.objects.atoms:
set_atom_drivers(context, atom, self)
hide = (self.draw_style == 'BALLS')
for bond in self.objects.bonds:
bond.hide = hide
#--- General functions -------------------------------------------------------#
def callback_draw_length(self, context):
try:
font_id = 0
blf.size(font_id, context.scene.mb.globals.bond_length_font_size, 72)
offset = 0
rv3d = context.space_data.region_3d
width = context.region.width
height = context.region.height
persp_mat = rv3d.perspective_matrix
persinv = persp_mat.inverted()
for ob in context.selected_objects:
if ob.mb.type == "BOND":
locs = [o.mb.world_location for o in ob.mb.bonded_atoms]
co_3d = (locs[0] + locs[1]) / 2.
prj = persp_mat * co_3d.to_4d()
x = width/2 + width/2 * (prj.x / prj.w)
y = height/2 + height/2 * (prj.y / prj.w)
blf.position(font_id, x, y, 0)
blf.draw(font_id, "{:6.4f}".format((locs[1]-locs[0]).length))
#ob = context.object
#if ob.type == "MESH":
#for v in ob.data.vertices:
#prj = persp_mat * v.co.to_4d()
#x = width/2 + width/2 * (prj.x / prj.w)
#y = height/2 + height/2 * (prj.y / prj.w)
#blf.position(font_id, x, y, 0)
#blf.draw(font_id, "{}".format(v.index))
except:
logger.exception('')
context.scene.mb.globals.show_bond_lengths = False
def add_element(context, element, element_dict):
'''
add element data to scene
'''
default = ELEMENTS_DEFAULT["Default"]
new = context.scene.mb.elements.add()
new.name = element
new.element = element
new.element_name = element_dict.get("element name",
default["element name"])
new.atomic_number = element_dict.get("atomic number",
default["atomic number"])
new.color = element_dict.get("color", default["color"])
new.covalent = element_dict.get("covalent", default["covalent"])
if "vdw1" in element_dict or "vdw2" in element_dict:
new.vdw = (element_dict["vdw1"] if element_dict["vdw1"] != 0.0
else (element_dict["vdw2"] if element_dict["vdw2"] != 0.0
else element_dict["covalent"]))
else:
new.vdw = element_dict.get("covalent", default["covalent"])
new.constant = 1.0
return new
def initialize_elements(context):
for element, data in ELEMENTS_DEFAULT.items():
add_element(context, element, data)
#--- Viewport functions ------------------------------------------------------#
def get_region_data(context, x, y):
for area in context.screen.areas:
if area.type != 'VIEW_3D':
continue
is_quadview = len(area.spaces.active.region_quadviews) != 0
i = -1
for region in area.regions:
if region.type == 'WINDOW':
i += 1
if (x > region.x and
y > region.y and
x < region.width + region.x and
y < region.height + region.y):
if is_quadview:
rv3d = area.spaces.active.region_quadviews[i]
else:
rv3d = area.spaces.active.region_3d
return (region, rv3d)
return (None, None)
def mouse_2d_to_location_3d(context, mouse2d, depth=Vector((0, 0, 0)),
region=None, rv3d=None):
x, y = mouse2d
# Get region and region data from mouse position.
# If region is given, passed rv3d is ignored.
if region == None:
region, rv3d = get_region_data(context, x, y)
# mouse coordinates relative to region
coord2d = (x - region.x, y - region.y)
if depth:
depth_location = depth
else:
depth_location = context.scene.cursor_location.copy()
return view3d_utils.region_2d_to_location_3d(region, rv3d, coord2d,
depth_location)
def return_cursor_object(context, event, ray_max=10000.0, exclude=None,
mb_type=''):
""" This is a function that can be run from a modal operator
to select the 3D object the mouse is hovered over.
"""
exclude = exclude or []
# get the context arguments
scene = context.scene
x, y = event.mouse_x, event.mouse_y
region, rv3d = get_region_data(context, x, y)
if not rv3d:
return None
coord2d = (x - region.x, y - region.y)
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord2d)
# have ray origin a little in front of actual origin, otherwise it might
# not get all of the objects
ray_origin = (view3d_utils.region_2d_to_origin_3d(region, rv3d, coord2d) +
0.5 * view_vector * ray_max)
ray_target = ray_origin - (view_vector * ray_max)
def visible_objects_and_duplis():
"""Loop over (object, matrix) pairs (mesh only)"""
for obj in context.visible_objects:
if obj.type == 'MESH':
if (mb_type and mb_type == obj.mb.type) or mb_type == '':
yield (obj, obj.matrix_world.copy())
def obj_ray_cast(obj, matrix):
"""Wrapper for ray casting that moves the ray into object space"""
try:
# get the ray relative to the object
matrix_inv = matrix.inverted()
ray_origin_obj = matrix_inv * ray_origin
ray_target_obj = matrix_inv * ray_target
# cast the ray
result, hit, normal, face_index = obj.ray_cast(ray_origin_obj,
ray_target_obj)
if face_index != -1:
return hit, normal, face_index
else:
return None, None, None
except ValueError as e:
logger.error("in obj_ray_cast: {}: {}".format(obj.name, e))
return None, None, None
finally:
pass
# cast rays and find the closest object
best_length_squared = ray_max * ray_max
best_obj = None
for obj, matrix in visible_objects_and_duplis():
if obj not in exclude and obj.type == 'MESH':
if len(obj.data.vertices) >= 4:
hit, normal, face_index = obj_ray_cast(obj, matrix)
if hit is not None:
hit_world = matrix * hit
length_squared = (hit_world - ray_origin).length_squared
if length_squared < best_length_squared:
best_length_squared = length_squared
best_obj = obj
# now we have the object under the mouse cursor,
# we could do lots of stuff but for the example just select.
return best_obj
def check_ob_dimensions(ob):
if ob.dimensions.x < 0.0001: #< ob.mb.get_molecule().bond_radius:
toggle = {'PLANE_X': 'PLANE_Z', 'PLANE_Z': 'PLANE_X'}
c = ob.constraints.get("mb.stretch", None)
if c:
c.keep_axis = toggle[c.keep_axis]
#--- Add object functions ----------------------------------------------------#
def get_atom_id(mol_name, atom_index):
return "{}.{:>04d}".format(mol_name, atom_index)
def add_atom(context, location, element, atom_name, atom_index, molecule):
# get new unique name for object
name = "atom_{}".format(get_atom_id(molecule.name, atom_index))
mesh_data = get_atom_data(element, molecule)
new_atom = bpy.data.objects.new(name, mesh_data)
context.scene.objects.link(new_atom)
new_atom.location = location
# set mb properties
new_atom.mb.name = new_atom.name
new_atom.mb.atom_name = atom_name
# parent to molecule origin
new_atom.parent = molecule.objects.parent
new_atom.mb.parent = molecule.objects.parent
# updating the element will call update_atom_element, which assigns a mesh,
# and sets all the drivers
new_atom.mb.element = element
# add atom object and mesh to molecule collections
molecule.add_object(new_atom)
return new_atom
def add_bond(context, first_atom, second_atom, bond_type="CONSTRAINT"):
if first_atom == second_atom:
logger.warning('add_bond: first_atom == second_atom')
return None
for b in first_atom.mb.bonds:
if b != None:
for ba in b.mb.bonded_atoms:
if ba == second_atom:
logger.warning(
"add_bond: Bond {}-{} already exists".format(
first_atom.mb.index, second_atom.mb.index),
)
return None
# get new unique name for bond
first_mol = first_atom.mb.get_molecule()
second_mol = second_atom.mb.get_molecule()
name = "bond_{}-{}".format(
get_atom_id(first_mol.name, first_atom.mb.index),
get_atom_id(second_mol.name, second_atom.mb.index)
)
bond_mesh = get_bond_data(first_mol)
new_bond = bpy.data.objects.new(name, bond_mesh)
context.scene.objects.link(new_bond)
new_bond.hide = (first_mol.draw_style == 'BALLS')
# set mb properties
new_bond.mb.type = 'BOND'
new_bond.mb.name = new_bond.name
new_bond.mb.add_bonded_atom(first_atom)
new_bond.mb.add_bonded_atom(second_atom)
# add bond to atoms mb props
first_atom.mb.add_bond(new_bond)
second_atom.mb.add_bond(new_bond)
# add it to first molecule collection
first_mol.add_object(new_bond)
new_bond.mb.parent = first_mol.objects.parent
if bond_type == "CONSTRAINT":
# don't parent, as parenting also affects the scale
c = new_bond.constraints.new('COPY_LOCATION')
c.name = "mb.parent"
c.target = first_atom
c = new_bond.constraints.new('STRETCH_TO')
c.name = "mb.stretch"
c.rest_length = 1.0
c.volume = 'NO_VOLUME'
c.target = second_atom
elif bond_type == "STATIC":
y_axis = Vector((0,1,0))
loc1 = first_atom.location
loc2 = second_atom.location
# Location
location = loc1
vec = (loc2 - loc1)
angle = vec.angle(y_axis, 0)
# vector of rotation
axis = y_axis.cross(vec)
new_bond.rotation_euler = Matrix.Rotation(angle, 4, axis).to_euler()
new_bond.location = loc1
new_bond.scale[1] = vec.length
assign_bond_material(new_bond)
set_bond_drivers(context, new_bond, new_bond.mb.get_molecule())
new_bond.parent = first_mol.objects.parent
return new_bond
#--- Get Mesh functions ------------------------------------------------------#
def get_atom_data(element, molecule, type='MESH', mesh_name=""):
"""
Retrieve mesh for a certain element. If mesh_name is given, the mesh is
retrieved from bpy.data.meshes if it exists, or created and assigned that
name. Otherwise the mesh is retrieved from molecule.meshes.
"""
if type == 'MESH':
if mesh_name:
me = bpy.context.blend_data.meshes.get(mesh_name)
else:
element = element.capitalize()
atom_name = "atom_mesh_{}.{}".format(molecule.name, element)
item = molecule.meshes.get(element)
if item:
me = item.data
else:
me = None
if not me:
# save last selection to restore later
selected = bpy.context.selected_objects
last_active = bpy.context.object
refine = molecule.refine_atoms
# create uv sphere and get mesh data
bpy.ops.mesh.primitive_uv_sphere_add(
location=(0,0,0), segments=refine*2, ring_count=refine)
new_atom = bpy.context.object
bpy.ops.object.shade_smooth()
me = new_atom.data
me.name = mesh_name or atom_name
me.mb.type = "ELEMENT"
if not mesh_name:
item = molecule.meshes.add()
item.name = element
item.data = me
# adds material slot to mesh, but don't assign material yet
new_atom.data.materials.append(None)
# finally delete object and return mesh data
bpy.context.scene.objects.unlink(new_atom)
bpy.data.objects.remove(new_atom)
# restore old selection
for o in selected:
o.select = True
bpy.context.scene.objects.active = last_active
return me
def get_bond_data(molecule, type='MESH', mesh_name=""):
new_bond = None
if type == 'MESH':
if mesh_name:
me = bpy.context.blend_data.meshes.get(mesh_name)
else:
bond_name = "bond"
item = molecule.meshes.get(bond_name)
if item:
me = item.data
else:
me = None
if not me:
# save last selection to restore later
selected = bpy.context.selected_objects
last_active = bpy.context.object
bpy.ops.mesh.primitive_cylinder_add(
location=(0,0,0), vertices=molecule.refine_bonds*2,
depth=1, radius=1.0)#, end_fill_type="NOTHING")
new_bond = bpy.context.object
for i in range(2):
new_bond.data.materials.append(None)
me = new_bond.data
me.name = mesh_name or "bond_mesh_{}".format(molecule.name)
me.mb.type = "BOND"
bm = bmesh.new()
bm.from_mesh(me)
# rotate and shrink first, then add another row of vertices
for vert in bm.verts:
# rotate 90 degrees around x, and shift along y axis
tmp_co = vert.co.copy()
vert.co.y = -tmp_co.z + .5
vert.co.z = -tmp_co.y
if vert.co.y > 0.01:
vert.select = False
new_verts = []
bm.edges.ensure_lookup_table()
for edge in bm.edges:
if abs(edge.verts[0].co.y - edge.verts[1].co.y) > .5:
#if hasattr(edge, "ensure_lookup_table"):
#edge.ensure_lookup_table()
e, v = bmesh.utils.edge_split(edge, edge.verts[0], 0.5)
new_verts.append(v)
n_verts = len(new_verts)
# bad hack, but don't understand how bmesh.utils.face_split works
# remove all faces
for f in bm.faces:
bm.faces.remove(f)
# now sort bm.verts
# v.co.y is either 0, 0.5, or 1.0.
# So multiply y with at least 4pi to sort by y value first
key = lambda v: v.co.y * 15 + math.atan2(v.co.x, v.co.z)
verts_sorted = sorted((v for v in bm.verts), key=key)
for i, v in enumerate(verts_sorted):
v.index = i
# add new faces and assign the two different material slots
for i in range(2*n_verts):
v1 = verts_sorted[i]
v2 = verts_sorted[(i + 1)%n_verts + n_verts*(i//n_verts)]
v3 = verts_sorted[(i + 1)%n_verts + n_verts*(i//n_verts + 1)]
v4 = verts_sorted[i + n_verts]
f = bm.faces.new((v1, v2, v3, v4))
f.material_index = i//n_verts # gives 0 or 1
f.smooth = True
# again, sort by center.y first, than angle
key = lambda f: (f.calc_center_median().y * 15 +
math.atan2(f.normal.x, f.normal.z))
half_faces = len(bm.faces)/2
for i, f in enumerate(sorted((f for f in bm.faces), key=key)):
f.index = i
bm.to_mesh(me)
bm.free()
me.update()
# finally delete object and reselect old selection
bpy.context.scene.objects.unlink(new_bond)
bpy.data.objects.remove(new_bond)
for o in selected:
o.select = True
bpy.context.scene.objects.active = last_active
if not mesh_name:
item = molecule.meshes.add()
item.name = bond_name
item.data = me
return me
def get_arrow_data(type='MESH', name="arrow",
radius = 0.1, ring_y = 0.9, ring_scale = 2):
if type == 'MESH':
data = bpy.context.blend_data.meshes.get(name)
if not data:
# Make arrow mesh
bpy.ops.mesh.primitive_cylinder_add(
location=(0,0,0), radius=radius, vertices=8, depth=1,
end_fill_type="TRIFAN")
ob = bpy.context.object
ob.data.materials.append(None)
# convert cylinder to arrow
bm = bmesh.new()
bm.from_mesh(ob.data)
# rotate and shrink first, then add another row of vertices
for vert in bm.verts:
# rotate 90 degrees around x, and shift along y axis
tmp_co = vert.co.copy()
vert.co.y = -tmp_co.z + .5
vert.co.z = tmp_co.y
if vert.co.y > 0.01:
vert.select = False
new_verts = []
for edge in bm.edges:
if edge.calc_length() == 1.0:
if hasattr(edge, "ensure_lookup_table"):
edge.ensure_lookup_table()
e, v = bmesh.utils.edge_split(edge, edge.verts[0], 0.5)
new_verts.append(v)
n_verts = len(new_verts)
# bad hack, but don't understand how bmesh.utils.face_split works
# remove faces with 6 verts
for f in bm.faces:
if len(f.verts) == 6:
bm.faces.remove(f)
# now sort bm.verts
# v.co.y is either 0, 0.5, or 1.0.
# So multiply y with at least 4pi to sort by y value first
key = lambda v: v.co.y * 15 + math.atan2(v.co.x, v.co.z)
verts_sorted = sorted(
(v for v in bm.verts if (0 < v.co.length and v.co.length != 1.0)),
key=key
)
# add new faces
for i in range(2*n_verts):
v1 = verts_sorted[i]
v2 = verts_sorted[(i + 1)%n_verts + n_verts*(i//n_verts)]
v3 = verts_sorted[(i + 1)%n_verts + n_verts*(i//n_verts + 1)]
v4 = verts_sorted[i + n_verts]
f = bm.faces.new((v1, v2, v3, v4))
# now shape the arrow head
for vert in bm.verts:
if vert.co.y == 1.0 and not vert.co.length == 1.0:
vert.co.y = ring_y
vert.co.x = vert.co.x * ring_scale
vert.co.z = vert.co.z * ring_scale
elif vert.co.y == 0.5:
vert.co.y = ring_y
# make everything smooth
for f in bm.faces:
f.smooth = True
bm.to_mesh(ob.data)
bm.free()
data = ob.data
data.name = name
bpy.context.scene.objects.unlink(ob)
return data
def get_arrow_head_data(name="arrow_head",
radius=0.2, depth=0.5):
data = bpy.context.blend_data.meshes.get(name)
if not data:
bpy.ops.mesh.primitive_cone_add(
location=(0,0,0), radius1=radius,
vertices=16, depth=depth,
end_fill_type="NGON")
ob = bpy.context.active_object
bpy.ops.object.shade_smooth()
data = ob.data
for v in data.vertices:
v.co[2] -= depth/2.
data.name = name
bpy.context.scene.objects.unlink(ob)
return data
def get_arrow_bevel_circle(name="arrow_bevel",
radius=0.1):
ob = bpy.context.blend_data.objects.get(name)
if not ob:
bpy.ops.curve.primitive_bezier_circle_add(
location=(0,0,0), radius=radius)
ob = bpy.context.active_object
ob.name = name
return ob
#--- Driver setting functions ------------------------------------------------#
def set_atom_drivers(context, atom, molecule):
fc_list = atom.driver_add('scale', -1) # add new driver
for fcurve in fc_list:
drv = fcurve.driver
drv.type = 'SCRIPTED'
drv.show_debug_info = True
var = drv.variables.get('atom_radius')
if not var:
var = drv.variables.new()
var.name = 'atom_radius' # name to use in scripting
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'SCENE'
targ.id = context.scene
targ.data_path = 'mb.elements["{}"].{}'.format(atom.mb.element,
molecule.radius_type)
var = drv.variables.get('atom_scale')
if not var:
var = drv.variables.new()
var.name = 'atom_scale' # name to use in scripting
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = atom
targ.data_path = 'mb.parent.mb.molecule.atom_scales["{}"].val'.format(
molecule.draw_style)
var = drv.variables.get('bond_radius')
if not var:
var = drv.variables.new()
var.name = 'bond_radius' # name to use in scripting
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = atom
targ.data_path = 'mb.parent.mb.molecule.bond_radius'.format(molecule.name)
if molecule.draw_style == 'BALLS':
drv.expression = "atom_radius * atom_scale"
elif molecule.draw_style == 'BAS':
drv.expression = "max(atom_radius * atom_scale, bond_radius)"
elif molecule.draw_style == 'STICKS':
drv.expression = "bond_radius"
def set_bond_drivers(context, bond, molecule):
fc_x = bond.driver_add('scale', 0)
fc_z = bond.driver_add('scale', 2)
for fcurve in (fc_x, fc_z):
drv = fcurve.driver
drv.type = 'AVERAGE'
drv.show_debug_info = True
var = drv.variables.get('bond_radius')
if not var:
var = drv.variables.new()
var.name = 'bond_radius' # name to use in scripting
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = bond
targ.data_path = 'mb.parent.mb.molecule.bond_radius'.format(
molecule.name)
def calculate_displacement_t0(qvec, sc, evec):
qR = qvec[0]*sc[0] + qvec[1]*sc[1] + qvec[2]*sc[2]
T = 20
Re = evec[0]
Im = evec[1]
vec = []
t0s = []
for dim in range(3):
# t_max == time of maximum (positive) displacement
tmax = T*(qR - math.atan2(Im[dim], Re[dim])/(2*math.pi))
t0s.append(tmax)
arg = 2*math.pi*(qR-tmax/T)
cos_max = math.cos(arg)
sin_max = math.sin(arg)
vec.append(Re[dim]*cos_max - Im[dim]*sin_max)
return Vector(vec), t0s
def update_mode_action(atom_ob, mol, nmode=None):
T = 20
action = atom_ob.animation_data.action
t = (mol.mode_arrows_phase%2.)/2. * T
if action:
iq = mol.active_nqpt
qvec = mol.qvecs[iq].qvec
sc = atom_ob.mb.supercell
if nmode is None:
nmode = mol.active_mode
if nmode == 0:
realvec = (0,0,0)
t_max = (0,0,0)
else:
disp = mol['mode']['displacements']
evec = disp[atom_ob.mb.index%len(disp)]
realvec, t_max = calculate_displacement_t0(qvec, sc, evec)
for dim in range(3):
# t0 == start time of animation, needs to be a negative number,
# so that the animation is well underway at frame 0
t0 = (t_max[dim] - T/4)
t0 = t0 - T*(t0//T) - T
fcu = action.fcurves[dim]
vec = realvec[dim]
for p in range(11):
frame = 1 + t0 + 5*p - 5
disp = pow(-1, p//2)*vec * mol.mode_scale
coords = disp if p%2 else 0.
fcu.keyframe_points[p].co = (frame, coords)
else:
msg = "Trying to update mode action on "
msg += "object {}, but it has no existing action.".format(atom_ob.name)
msg += " Did you change the molecule after importing the modes?"
logger.error(msg)
def create_mode_action(context, atom_ob, molecule):
anim_data = atom_ob.animation_data_create()
atom_id = get_atom_id(molecule.name, atom_ob.mb.index)
acname = "mode_{}".format(atom_id)
oldaction = bpy.data.actions.get(acname)
if oldaction:
bpy.data.actions.remove(oldaction)
action = bpy.data.actions.new(acname)
anim_data.action = action
# make new group
atom_ob.update_tag(refresh={'OBJECT'})
ag = action.groups.new("Delta_location")
for dim in range(3):
fcu = action.fcurves.new(data_path="delta_location", index=dim)
fcu.group = ag
fcu.keyframe_points.add(11)
# We need two revolutions so that one full period is within 20 frames
# The offset of -5 puts the equilibrium for q=0 to frame 1
for p in range(11):
fcu.keyframe_points[p].co = 1.0 + 5*p - 5, 0.
fcu.keyframe_points[p].interpolation = 'SINE'
if p == 0:
fcu.keyframe_points[p].easing = "EASE_IN_OUT"
if p%2 == 0:
fcu.keyframe_points[p].easing = "EASE_OUT"
else:
fcu.keyframe_points[p].easing = "AUTO"
fcu.update()
def create_mode_arrow(context, atom_ob, mol, type='3D'):
if type not in ("3D", ):
logger.error("type '{}' not recognized in draw_mode_arrow".format(type))
return None
try:
atom_ob.animation_data.action.fcurves[2]
except:
msg = "atom object '{}' has no action to drive mode arrow."
logger.error(msg.format(atom_ob.format))
return None
material = bpy.data.materials.get('mode_arrows_{}'.format(mol.name))
if not material:
material = new_material('mode_arrows_{}'.format(mol.name),
color=(.8,0,0))
if type == '3D':
mesh_name = "mode_arrow_{}".format(mol.name)
arrow_ob = atom_ob.mb.mode_arrow
if not arrow_ob:
me = get_arrow_data(type='MESH', name=mesh_name,
radius = 0.05, ring_y = 0.75, ring_scale = 2)
ob_name = "mode_vec_{}".format(get_atom_id(mol.name, atom_ob.mb.index))
arrow_ob = bpy.data.objects.new(ob_name, me)
arrow_ob.parent = atom_ob
arrow_ob.material_slots[0].material = material
context.scene.objects.link(arrow_ob)
atom_ob.mb.mode_arrow = arrow_ob
mol.add_object(arrow_ob, parent_to_mol=False, type='MODE_ARROW')
arrow_ob.hide = not mol.show_mode_arrows
arrow_ob.hide_render = not mol.show_mode_arrows
arrow_ob.rotation_mode = 'QUATERNION'
fc_list = arrow_ob.driver_add('scale', -1)
for dim, fcurve in enumerate(fc_list):
drv = fcurve.driver
drv.type = 'SCRIPTED'
drv.show_debug_info = True
drv.use_self = True
var = drv.variables.get('arrow_scale')
if not var:
var = drv.variables.new()
var.name = 'arrow_scale'
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = atom_ob
targ.data_path = "mb.parent.mb.molecule.mode_arrows_scale"
expr = ("(Vector([fcu.evaluate(frame+1) for fcu in {fcus}])"
"-Vector([fcu.evaluate(frame-1) for fcu in {fcus}])).length"
"* arrow_scale")
expr = expr.format(fcus="self.parent.animation_data.action.fcurves")
drv.expression = expr
fc_list = arrow_ob.driver_add('rotation_quaternion', -1) # add new driver
for dim, fcurve in enumerate(fc_list):
drv = fcurve.driver
drv.type = 'SCRIPTED'
drv.show_debug_info = True
drv.use_self = True
expr = ("Vector((0,1,0)).rotation_difference("
"(Vector([fcu.evaluate(frame+1) for fcu in {fcus}])"
"-Vector([fcu.evaluate(frame-1) for fcu in {fcus}]))"
"[{dim}]")
expr = expr.format(fcus="self.parent.animation_data.action.fcurves",
dim=dim)
drv.expression = expr
return arrow_ob
def remove_mode_arrows(mol, context):
for ob in mol.objects.mode_arrows.objects:
mol.remove_object(ob)
if ob.name in context.scene.objects:
context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
def draw_dipole(mol, dipole_vec, context):
remove_dipole(mol, context)
radius = 0.08
head_radius = .2
head_depth = .5
all_obs = []
# add empty as origin
origin_ob = bpy.data.objects.new("Dipole_{}".format(mol.name),
None)
origin_ob.empty_draw_size = 0.5
context.scene.objects.link(origin_ob)
origin_ob.parent = mol.objects.parent
mol.objects.dipole.origin = origin_ob
origin_ob.mb.parent = mol.objects.parent
all_obs.append(origin_ob)
# add empty as stretch target
dipole_ob = bpy.data.objects.new(
"dipole_target_{}".format(mol.name), None)
dipole_ob.empty_draw_type = 'SINGLE_ARROW'
dipole_ob.empty_draw_size = 0.5
context.scene.objects.link(dipole_ob)
mol.objects.dipole.target = dipole_ob
dipole_ob.location = dipole_vec
dipole_ob.parent = origin_ob
dipole_ob.mb.parent = mol.objects.parent
all_obs.append(dipole_ob)
material = bpy.data.materials.get('dipole_{}'.format(mol.name))
if not material:
material = new_material('dipole_{}'.format(mol.name),
color=(.8,0,0))
head_mesh = get_arrow_head_data(
name='dipole_head_{}'.format(mol.name),
radius=head_radius, depth=head_depth
)
bevel_ob = get_arrow_bevel_circle(
radius=radius,
name="dipole_bevel_{}".format(mol.name)
)
# add arrow object
curve_ob, head_ob = draw_arrow(
context, origin_ob, dipole_ob, head_mesh, bevel_ob, material=material,
name_pre="dipole", name_post=mol.name
)
for ob in (bevel_ob, curve_ob, head_ob):
ob.parent = origin_ob
all_obs.append(ob)
mol.add_object(ob, parent_to_mol=False, type='DIPOLE')
return [origin_ob, dipole_ob, curve_ob, head_ob]
#--- Unit cell functions -----------------------------------------------------#
def draw_arrow(context, origin, target, head_mesh, bevel_ob,
material=None,
name_pre="", name_post=""):
if name_pre:
name_pre = "{}_".format(name_pre)
if name_post:
name_post = "_{}".format(name_post)
cu = bpy.data.curves.new(
"{}bz{}".format(name_pre, name_post),
type='CURVE'
)
cu.dimensions = '3D'
cu.resolution_u = 1
cu.use_fill_caps = True
bz = cu.splines.new('BEZIER')
bz.bezier_points.add(2-len(bz.bezier_points))
bz.bezier_points[0].co = (0, 0, 0)
# add driver to second point
bp = bz.bezier_points[1]
fc_list = bp.driver_add("co", -1)
for dim, fcurve in enumerate(fc_list):
drv = fcurve.driver
drv.type = 'AVERAGE'
drv.show_debug_info = True
var_name = name_pre
var = drv.variables.get(var_name)
if not var:
var = drv.variables.new()
var.name = var_name
var.type = 'TRANSFORMS'
targ = var.targets[0]
targ.id = target
targ.transform_type = "LOC_{}".format('XYZ'[dim])
targ.transform_space = 'LOCAL_SPACE'
curve_ob = bpy.data.objects.new(
"{}bz{}".format(name_pre, name_post), cu)
context.scene.objects.link(curve_ob)
if material:
curve_ob.data.materials.append(None)
curve_ob.material_slots[0].link = 'OBJECT'
curve_ob.material_slots[0].material = material
head_ob = bpy.data.objects.new(
"{}arrowhead{}".format(name_pre, name_post),
head_mesh)
context.scene.objects.link(head_ob)
if material:
head_ob.data.materials.append(None)
head_ob.material_slots[0].link = 'OBJECT'
head_ob.material_slots[0].material = material
mod = head_ob.modifiers.new(name='mb.bevel', type='BEVEL')
mod.width = 0.01
mod.segments = 3
mod.limit_method = 'ANGLE'
mod.angle_limit = 90
# The next two constraints are needed to keep arrowhead in place
# even if unit cell is moved
c = head_ob.constraints.new('COPY_LOCATION')
c.name = "mb.loc"
c.target = curve_ob
c.target_space = 'LOCAL'
c.owner_space = 'WORLD'
c = head_ob.constraints.new('COPY_ROTATION')
c.name = "mb.rot"
c.target = curve_ob
c.target_space = 'LOCAL'
c.owner_space = 'WORLD'
# puts the arrow head at the end of the path
c = head_ob.constraints.new('FOLLOW_PATH')
c.name = "mb.head"
c.target = curve_ob
c.use_curve_follow = True
c.use_fixed_location = True
c.offset_factor = 1.0
c.forward_axis = 'FORWARD_Z'
c.up_axis = 'UP_Y'
cu.bevel_object = bevel_ob
cu.bevel_factor_mapping_start = 'SPLINE'
cu.bevel_factor_mapping_end = 'SPLINE'
# add driver to bevel end so that bevel ends just after head begins
fcurve = cu.driver_add('bevel_factor_end')
drv = fcurve.driver
drv.type = 'SCRIPTED'
drv.show_debug_info = True
var_name = 'length'
var = drv.variables.get(var_name)
if not var:
var = drv.variables.new()
var.name = var_name
var.type = 'LOC_DIFF'
targ = var.targets[0]
targ.id = origin
targ = var.targets[1]
targ.id = target
drv.expression = "1 - {}/length + 0.01".format(head_ob.dimensions[2])
return [curve_ob, head_ob]
def draw_unit_cell(molecule, context, draw_style='ARROWS'):
mol_uc = molecule.objects.unit_cell
# first remove old unit cell if present
remove_unit_cell(molecule, context)
all_obs = []
special_obs = []
if not "unit_cells" in molecule or not molecule["unit_cells"]:
logger.error("No unit cell information present")
return None
unit_vectors = Matrix(molecule["unit_cells"][0])
# first create new empty as origin
uc_origin = bpy.data.objects.new("Unit_cell_{}".format(molecule.name),
None)
uc_origin.empty_draw_type = "CUBE"
uc_origin.empty_draw_size = 0.3
context.scene.objects.link(uc_origin)
uc_origin.parent = molecule.objects.parent
special_obs.append(uc_origin)
mol_uc.origin = uc_origin
# now create the unit cell frame
# upper case so it shows up at the top of the outliner list
me = bpy.data.meshes.new("Unit_cell_{}".format(molecule.name))
# setting the coordinates is technically unneccesary, since I'm adding
# drivers later. It does look nicer though then setting everything to 0.
coords = (
(0,0,0), #0, O
unit_vectors[0], #1, x
unit_vectors[0] + unit_vectors[1], #2, xy
unit_vectors[1], #3, y
unit_vectors[2], #4, z
unit_vectors[0] + unit_vectors[2], #5, xz
unit_vectors[1] + unit_vectors[2], #6, yz
unit_vectors[0] + unit_vectors[1] + unit_vectors[2], #7, xyz
)
faces = (
(0, 3, 2, 1),
(0, 4, 6, 3),
(0, 1, 5, 4),
(7, 6, 4, 5),
(7, 5, 1, 2),
(7, 2, 3, 6),
)
me.from_pydata(coords, [], faces)
uc_cube = bpy.data.objects.new("uc_frame_{}".format(molecule.name), me)
context.scene.objects.link(uc_cube)
uc_cube.parent = uc_origin
# instead of drawing in wireframe, add wireframe modifier, so it's ready
# to be rendered
mod = uc_cube.modifiers.new("mb.wireframe", 'WIREFRAME')
mod.thickness = 0.1
mod.crease_weight = 1.0
mod.offset = -1
all_obs.append(uc_cube)
# add empties
for axdim, ax in enumerate(unit_vectors):
uc_empty = bpy.data.objects.new(
"{}_uc_{}".format("abc"[axdim], molecule.name), None)
uc_empty.empty_draw_type = 'ARROWS'
uc_empty.empty_draw_size = 0.5
context.scene.objects.link(uc_empty)
uc_empty.location = ax
uc_empty.parent = uc_origin
special_obs.append(uc_empty)
setattr(mol_uc, "abc"[axdim], uc_empty)
if len(molecule["unit_cells"]) > 1:
anim_data = uc_empty.animation_data_create()
action = bpy.data.actions.new(
name="frames_{}".format(uc_empty.name)
)
anim_data.action = action
ag = action.groups.new("Location")
for dim in range(3):
fcu = action.fcurves.new(data_path="location", index=dim)
fcu.group = ag
for nf in range(len(molecule["unit_cells"])):
loc = molecule["unit_cells"][nf][axdim][dim]
fcu.keyframe_points.add(1)
fcu.keyframe_points[-1].co = nf + 1, loc
fcu.keyframe_points[-1].interpolation = 'LINEAR'
# Now set drivers of vertex coordinates to empties
for i, axes in enumerate((
#[], #0, O
[0], #1, x
[0, 1], #2, xy
[1], #3, y
[2], #4, z
[0, 2], #5, xz
[1, 2], #6, yz
[0, 1, 2], #7, xyz
)):
v = me.vertices[i+1]
fc_list = v.driver_add("co", -1)
for dim, fcurve in enumerate(fc_list):
drv = fcurve.driver
drv.type = 'SCRIPTED'
drv.show_debug_info = True
expr = []
for axdim in axes:
var_name = "{}_{}".format('abc'[axdim], 'xyz'[dim])
var = drv.variables.get(var_name)
if not var:
var = drv.variables.new()
var.name = var_name
var.type = 'TRANSFORMS'
targ = var.targets[0]
#targ.id_type = 'OBJECT'
targ.id = getattr(mol_uc, "abc"[axdim])
targ.transform_type = "LOC_{}".format('XYZ'[dim])
targ.transform_space = 'LOCAL_SPACE'
expr.append(var_name)
drv.expression = " + ".join(expr)
vg = []
vg.append(uc_cube.vertex_groups.new('a'))
vg[-1].add([1], 1, 'REPLACE')
vg.append(uc_cube.vertex_groups.new('b'))
vg[-1].add([3], 1, 'REPLACE')
vg.append(uc_cube.vertex_groups.new('c'))
vg[-1].add([4], 1, 'REPLACE')
if 'ARROWS' in draw_style:
radius = 0.08
head_radius = .2
head_depth = .5
# get material
material = bpy.data.materials.get('axes_{}'.format(molecule.name))
if not material:
material = new_material('axes_{}'.format(molecule.name),
color=(1,0,0))
# add sphere at origin
bpy.ops.mesh.primitive_uv_sphere_add(location=(0,0,0), size=radius,
segments=8, ring_count=8)
sphere_ob = context.object
sphere_ob.name = "unit_cell_origin_{}".format(molecule.name)
#ob.parent_type = 'VERTEX'
sphere_ob.data.materials.append(None)
sphere_ob.material_slots[0].link = 'OBJECT'
sphere_ob.material_slots[0].material = material
all_obs.append(sphere_ob)
head_mesh = get_arrow_head_data(
name='arrow_head_{}'.format(molecule.name),
radius=head_radius, depth=head_depth
)
bevel_ob = get_arrow_bevel_circle(
radius=radius,
name="arrow_bevel_{}".format(molecule.name)
)
all_obs.append(bevel_ob)
for axdim, vec in enumerate(unit_vectors):
origin = uc_origin
target = getattr(mol_uc, "abc"[axdim])
arrow_obs = draw_arrow(context, origin, target,
head_mesh, bevel_ob, material=material,
name_pre="{}_uc".format("abc"[axdim]),
name_post=molecule.name)
all_obs.extend(arrow_obs)
for ob in all_obs:
ob.parent = uc_origin
ob.mb.parent = molecule.objects.parent
molecule.add_object(ob, parent_to_mol=False, type='UC')
for ob in special_obs:
ob.mb.type = 'UC'
ob.mb.parent = molecule.objects.parent
return all_obs + special_obs
def remove_dipole(mol, context):
for o in ("origin", "target"):
ob = getattr(mol.objects.dipole, o, None)
if ob:
if ob.name in context.scene.objects:
context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
setattr(mol.objects.dipole, o, None)
for ob in mol.objects.dipole.objects:
mol.remove_object(ob)
if ob.name in context.scene.objects:
context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
def remove_unit_cell(mol, context):
mol_uc = mol.objects.unit_cell
ob = mol_uc.origin
if ob:
if ob.name in context.scene.objects:
context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
for ax in "abc":
ob = getattr(mol_uc, ax, None)
if ob:
if ob.name in context.scene.objects:
context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
setattr(mol_uc, ax, None)
for ob in mol_uc.objects:
mol.remove_object(ob)
if ob.name in context.scene.objects:
context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
#--- Material functions ------------------------------------------------------#
def new_material(name, color=(0.8, 0.8, 0.8, 1.0)):
'''
creates new material.
'''
material = bpy.data.materials.new(name)
if bpy.context.scene.render.engine == 'CYCLES':
material.use_nodes = True
# add driver to rendered color to be the same as display color
nodes = material.node_tree.nodes
links = material.node_tree.links
nodes.clear()
principled = nodes.new(type='ShaderNodeBsdfPrincipled')
principled.name = "mb.principled"
try:
principled.inputs[0].default_value = color
except:
color_alpha = (color[0], color[1], color[2], 1.0)
principled.inputs[0].default_value = color_alpha
output_mat = nodes.new(type='ShaderNodeOutputMaterial')
links.new(principled.outputs[0], output_mat.inputs[0])
for i in range(3): # not for alpha channel
fcurve = material.driver_add('diffuse_color', i)
drv = fcurve.driver
drv.type = 'AVERAGE'
drv.show_debug_info = True
var = drv.variables.new()
var.name = 'diffuse_color' # name to use in scripting
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'MATERIAL'
targ.id = material
targ.data_path = ('node_tree.nodes["mb.principled"].inputs[0]'
+ '.default_value[{}]'.format(i))
else:
material.diffuse_color = color[:3]
return material
def assign_atom_material(ob, molecule):
# make sure there is at least one material slot
if len(ob.material_slots) < 1:
ob.data.materials.append(None)
# get element and molecule name (ident) of atom to make unique material name
element = ob.mb.element
material_name = "mat_{}_{}".format(element, molecule.name)
# get or create element material per molecule
material = bpy.data.materials.get(material_name)
if not material:
scn_elements = bpy.context.scene.mb.elements
color = scn_elements.get(element, scn_elements["Default"]).color
# get material color from elements list, and Default if not an element
material = new_material(material_name, color=color)
# finally, assign material to first slot.
ob.material_slots[0].link = 'DATA'
ob.material_slots[0].material = material
def assign_bond_material(ob):
bond_type = 'MESH'
bond_mol = ob.mb.get_molecule()
first_atom = ob.mb.bonded_atoms[0]
second_atom = ob.mb.bonded_atoms[1]
first_mol = first_atom.mb.get_molecule()
second_mol = second_atom.mb.get_molecule()
first_mat = None
second_mat = None
# the molecule properties of the two bonded atoms are used.
# to use the bond_mol properties change accordingly
if first_mol.bond_material == 'ATOMS':
first_mat = first_atom.material_slots[0].material
elif first_mol.bond_material == 'GENERIC':
first_mat = first_mol.bond_generic_material
if not first_mat:
first_mat_name = "mat_bond_{}".format(first_mol.name)
first_mat = bpy.data.materials.get(first_mat_name)
if not first_mat:
first_mat = new_material(first_mat_name)
first_mol.bond_generic_material = first_mat
if second_mol.bond_material == 'ATOMS':
second_mat = second_atom.material_slots[0].material
elif second_mol.bond_material == 'GENERIC':
second_mat = second_mol.bond_generic_material
if not second_mat:
second_mat_name = "mat_bond_{}".format(second_mol.name)
second_mat = bpy.data.materials.get(second_mat_name)
if not second_mat:
second_mat = new_material(second_mat_name)
second_mol.bond_generic_material = second_mat
# make sure to have at least two material slots
for i in range(2 - len(ob.material_slots)):
ob.data.materials.append(None)
ob.material_slots[0].link = 'OBJECT'
ob.material_slots[1].link = 'OBJECT'
ob.material_slots[0].material = first_mat
ob.material_slots[1].material = second_mat
def is_inside_of_planes(planes, loc, flip=False):
l0 = Vector(loc)
for n, p0 in planes:
vec = p0 - l0
if (vec).dot(n) < 0:
# point lies outside of plane
return flip
else:
return not flip
|
floaltvater/molblend
|
mb_utils.py
|
Python
|
gpl-2.0
| 57,132
|
[
"ABINIT",
"Quantum ESPRESSO",
"phonopy"
] |
15a9760aee70794b22c1e3c418086cd6ceaf7f6f9787b3209308b2d0fa592588
|
import os
import subprocess
import sys
import threading
import functools
import contextlib
import logging
import re
import time
from StringIO import StringIO
from test import test_support
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
try:
import unittest2 as unittest
except ImportError:
import unittest
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
@functools.wraps(func)
def decorator(*args):
key = test_support.threading_setup()
try:
return func(*args)
finally:
test_support.threading_cleanup(*key)
return decorator
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip()
return stderr
@contextlib.contextmanager
def captured_stderr():
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
logging_stream = StringIO()
handler = logging.StreamHandler(logging_stream)
logging.root.addHandler(handler)
try:
yield logging_stream
finally:
logging.root.removeHandler(handler)
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError:
e = sys.exc_info()[1]
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test_support.verbose:
print("%.2fs" % dt)
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import %s
from time import sleep
from test_futures import sleep_and_print
t = %s(5)
t.submit(sleep_and_print, 1.0, "apple")
""" % (self.executor_type.__name__, self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), "apple".encode())
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes:
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes:
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes:
p.join()
class WaitTests(unittest.TestCase):
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=1.5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
fs = set(self.executor.submit(future_func) for i in range(100))
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setcheckinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
pass
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
pass
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(next(i), (0, 1))
self.assertEqual(next(i), (0, 1))
self.assertRaises(ZeroDivisionError, next, i)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 3],
timeout=1.5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertEqual(len(finished), 10)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
pass
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result[0])
def test_done_callback_with_exception(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_with_cancel(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled[0])
def test_done_callback_raises(self):
with captured_stderr() as stderr:
raising_was_called = [False]
fn_was_called = [False]
def raising_fn(callback_future):
raising_was_called[0] = True
raise Exception('doh!')
def fn(callback_future):
fn_was_called[0] = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result[0])
def test_done_callback_already_failed(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_already_cancelled(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled[0])
def test_repr(self):
self.assertRegexpMatches(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegexpMatches(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegexpMatches(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegexpMatches(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised IOError>')
self.assertRegexpMatches(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=IOError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
IOError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = IOError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
@reap_threads
def test_main():
try:
test_support.run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest)
finally:
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
cstipkovic/spidermonkey-research
|
python/futures/test_futures.py
|
Python
|
mpl-2.0
| 24,730
|
[
"Brian"
] |
3b63404fbc48ac1b9756192b8d4d5494e0151f9896be2616caedbe2425e3d345
|
#!/usr/bin/env python
# Script to build windows installer packages for LAMMPS
# (c) 2017,2018,2019,2020 Axel Kohlmeyer <akohlmey@gmail.com>
from __future__ import print_function
import sys,os,shutil,glob,re,subprocess,tarfile,gzip,time,inspect
try: from urllib.request import urlretrieve as geturl
except: from urllib import urlretrieve as geturl
try:
import multiprocessing
numcpus = multiprocessing.cpu_count()
except:
numcpus = 1
# helper functions
def error(str=None):
if not str: print(helpmsg)
else: print(sys.argv[0],"ERROR:",str)
sys.exit()
def getbool(arg,keyword):
if arg in ['yes','Yes','Y','y','on','1','True','true']:
return True
elif arg in ['no','No','N','n','off','0','False','false']:
return False
else:
error("Unknown %s option: %s" % (keyword,arg))
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def getexe(url,name):
gzname = name + ".gz"
geturl(url,gzname)
with gzip.open(gzname,'rb') as gz_in:
with open(name,'wb') as f_out:
shutil.copyfileobj(gz_in,f_out)
gz_in.close()
f_out.close()
os.remove(gzname)
def system(cmd):
try:
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError as e:
print("Command '%s' returned non-zero exit status" % e.cmd)
error(e.output.decode('UTF-8'))
return txt.decode('UTF-8')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# record location and name of python script
homedir, exename = os.path.split(os.path.abspath(inspect.getsourcefile(lambda:0)))
# default settings help message and default settings
bitflag = '64'
parflag = 'no'
pythonflag = False
thrflag = 'omp'
revflag = 'stable'
verbose = False
gitdir = os.path.join(homedir,"lammps")
adminflag = True
msixflag = False
helpmsg = """
Usage: python %s -b <bits> -j <cpus> -p <mpi> -t <thread> -y <yes|no> -r <rev> -v <yes|no> -g <folder> -a <yes|no>
Flags (all flags are optional, defaults listed below):
-b : select Windows variant (default value: %s)
-b 32 : build for 32-bit Windows
-b 64 : build for 64-bit Windows
-j : set number of CPUs for parallel make (default value: %d)
-j <num> : set to any reasonable number or 1 for serial make
-p : select message passing parallel build (default value: %s)
-p mpi : build an MPI parallel version with MPICH2 v1.4.1p1
-p no : build a serial version using MPI STUBS library
-t : select thread support (default value: %s)
-t omp : build with threads via OpenMP enabled
-t no : build with thread support disabled
-y : select python support (default value: %s)
-y yes : build with python included
-y no : build without python
-r : select LAMMPS source revision to build (default value: %s)
-r stable : download and build the latest stable LAMMPS version
-r release : download and build the latest patch release LAMMPS version
-r develop : download and build the latest development snapshot
-r patch_<date> : download and build a specific patch release
-r maintenance_<date> : download and build a specific maintenance branch
-r <sha256> : download and build a specific snapshot version
-v : select output verbosity
-v yes : print progress messages and output of make commands
-v no : print only progress messages
-g : select folder with git checkout of LAMMPS sources
-g <folder> : use LAMMPS checkout in <folder> (default value: %s)
-a : select admin level installation (default value: yes)
-a yes : the created installer requires to be run at admin level
and LAMMPS is installed to be accessible by all users
-a no : the created installer runs without admin privilege and
LAMMPS is installed into the current user's appdata folder
-a msix : same as "no" but adjust for creating an MSIX package
Example:
python %s -r release -t omp -p mpi
""" % (exename,bitflag,numcpus,parflag,thrflag,pythonflag,revflag,gitdir,exename)
# parse arguments
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
if i+1 >= argc:
print("\nMissing argument to flag:",argv[i])
error()
if argv[i] == '-b':
bitflag = argv[i+1]
elif argv[i] == '-j':
numcpus = int(argv[i+1])
elif argv[i] == '-p':
parflag = argv[i+1]
elif argv[i] == '-t':
thrflag = argv[i+1]
elif argv[i] == '-y':
pythonflag = getbool(argv[i+1],"python")
elif argv[i] == '-r':
revflag = argv[i+1]
elif argv[i] == '-v':
verbose = getbool(argv[i+1],"verbose")
elif argv[i] == '-a':
if argv[i+1] in ['msix','MSIX']:
adminflag = False
msixflag = True
else:
msixflag = False
adminflag = getbool(argv[i+1],"admin")
elif argv[i] == '-g':
gitdir = fullpath(argv[i+1])
else:
print("\nUnknown flag:",argv[i])
error()
i+=2
# checks
if bitflag != '32' and bitflag != '64':
error("Unsupported bitness flag %s" % bitflag)
if parflag != 'no' and parflag != 'mpi':
error("Unsupported parallel flag %s" % parflag)
if thrflag != 'no' and thrflag != 'omp':
error("Unsupported threading flag %s" % thrflag)
# test for valid revision name format: branch names, release tags, or commit hashes
rev1 = re.compile("^(stable|release|develop)$")
rev2 = re.compile(r"^(patch|stable)_\d+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\d{4}$")
rev3 = re.compile(r"^[a-f0-9]{40}$")
rev4 = re.compile(r"^maintenance-\d+-\d+-\d+")
if not rev1.match(revflag) and not rev2.match(revflag) and not rev3.match(revflag) and not rev4.match(revflag):
error("Unsupported revision flag %s" % revflag)
# create working directory
if adminflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s" % (bitflag,parflag,thrflag,revflag))
else:
if pythonflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-python" % (bitflag,parflag,thrflag,revflag))
elif msixflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-msix" % (bitflag,parflag,thrflag,revflag))
else:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-noadmin" % (bitflag,parflag,thrflag,revflag))
shutil.rmtree(builddir,True)
try:
os.mkdir(builddir)
except:
error("Cannot create temporary build folder: %s" % builddir)
# check for prerequisites and set up build environment
if bitflag == '32':
cc_cmd = which('i686-w64-mingw32-gcc')
cxx_cmd = which('i686-w64-mingw32-g++')
fc_cmd = which('i686-w64-mingw32-gfortran')
ar_cmd = which('i686-w64-mingw32-ar')
size_cmd = which('i686-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallsmall'
else:
cc_cmd = which('x86_64-w64-mingw32-gcc')
cxx_cmd = which('x86_64-w64-mingw32-g++')
fc_cmd = which('x86_64-w64-mingw32-gfortran')
ar_cmd = which('x86_64-w64-mingw32-ar')
size_cmd = which('x86_64-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallbig'
print("""
Settings: building LAMMPS revision %s for %s-bit Windows
Message passing : %s
Multi-threading : %s
Home folder : %s
Source folder : %s
Build folder : %s
C compiler : %s
C++ compiler : %s
Fortran compiler : %s
Library archiver : %s
""" % (revflag,bitflag,parflag,thrflag,homedir,gitdir,builddir,cc_cmd,cxx_cmd,fc_cmd,ar_cmd))
# create/update git checkout
if not os.path.exists(gitdir):
txt = system("git clone https://github.com/lammps/lammps.git %s" % gitdir)
if verbose: print(txt)
os.chdir(gitdir)
txt = system("git fetch origin")
if verbose: print(txt)
txt = system("git checkout %s" % revflag)
if verbose: print(txt)
if revflag == "develop" or revflag == "stable" or revflag == "release" or rev4.match(revflag):
txt = system("git pull")
if verbose: print(txt)
# switch to build folder
os.chdir(builddir)
# download what is not automatically downloaded by CMake
print("Downloading third party tools")
url='http://download.lammps.org/thirdparty'
print("FFMpeg")
getexe("%s/ffmpeg-win%s.exe.gz" % (url,bitflag),"ffmpeg.exe")
print("gzip")
getexe("%s/gzip.exe.gz" % url,"gzip.exe")
if parflag == "mpi":
mpiflag = "on"
else:
mpiflag = "off"
if thrflag == "omp":
ompflag = "on"
else:
ompflag = "off"
print("Configuring build with CMake")
cmd = "mingw%s-cmake -G Ninja -D CMAKE_BUILD_TYPE=Release" % bitflag
cmd += " -D ADD_PKG_CONFIG_PATH=%s/mingw%s-pkgconfig" % (homedir,bitflag)
cmd += " -C %s/mingw%s-pkgconfig/addpkg.cmake" % (homedir,bitflag)
cmd += " -C %s/cmake/presets/mingw-cross.cmake %s/cmake" % (gitdir,gitdir)
cmd += " -DBUILD_SHARED_LIBS=on -DBUILD_MPI=%s -DBUILD_OPENMP=%s" % (mpiflag,ompflag)
cmd += " -DWITH_GZIP=on -DWITH_FFMPEG=on -DLAMMPS_EXCEPTIONS=on"
cmd += " -DINTEL_LRT_MODE=c++11 -DBUILD_LAMMPS_SHELL=on"
cmd += " -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
if pythonflag: cmd += " -DPKG_PYTHON=yes"
print("Running: ",cmd)
txt = system(cmd)
if verbose: print(txt)
print("Compiling")
system("ninja")
print("Done")
print("Building PDF manual")
os.chdir(os.path.join(gitdir,"doc"))
txt = system("make pdf")
if verbose: print(txt)
shutil.move("Manual.pdf",os.path.join(builddir,"LAMMPS-Manual.pdf"))
print("Done")
# switch back to build folder and copy/process files for inclusion in installer
print("Collect and convert files for the Installer package")
os.chdir(builddir)
shutil.copytree(os.path.join(gitdir,"examples"),os.path.join(builddir,"examples"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"bench"),os.path.join(builddir,"bench"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"tools"),os.path.join(builddir,"tools"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"python","lammps"),os.path.join(builddir,"python","lammps"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"potentials"),os.path.join(builddir,"potentials"),symlinks=False)
shutil.copy(os.path.join(gitdir,"README"),os.path.join(builddir,"README.txt"))
shutil.copy(os.path.join(gitdir,"LICENSE"),os.path.join(builddir,"LICENSE.txt"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","colvars-refman-lammps.pdf"),os.path.join(builddir,"Colvars-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"tools","createatoms","Manual.pdf"),os.path.join(builddir,"CreateAtoms-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","kspace.pdf"),os.path.join(builddir,"Kspace-Extra-Info.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_gayberne_extra.pdf"),os.path.join(builddir,"PairGayBerne-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_resquared_extra.pdf"),os.path.join(builddir,"PairReSquared-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_overview.pdf"),os.path.join(builddir,"PDLAMMPS-Overview.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_EPS.pdf"),os.path.join(builddir,"PDLAMMPS-EPS.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_VES.pdf"),os.path.join(builddir,"PDLAMMPS-VES.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SPH_LAMMPS_userguide.pdf"),os.path.join(builddir,"SPH-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","MACHDYN_LAMMPS_userguide.pdf"),os.path.join(builddir,"MACHDYN-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","CG-DNA.pdf"),os.path.join(builddir,"CG-DNA-Manual.pdf"))
# prune outdated inputs, too large files, or examples of packages we don't bundle
for d in ['accelerate','kim','mscg','PACKAGES/quip','PACKAGES/vtk']:
shutil.rmtree(os.path.join("examples",d),True)
for d in ['FERMI','KEPLER']:
shutil.rmtree(os.path.join("bench",d),True)
shutil.rmtree("tools/msi2lmp/test",True)
os.remove("potentials/C_10_10.mesocnt")
os.remove("potentials/TABTP_10_10.mesont")
os.remove("examples/PACKAGES/mesont/C_10_10.mesocnt")
os.remove("examples/PACKAGES/mesont/TABTP_10_10.mesont")
# convert text files to CR-LF conventions
txt = system("unix2dos LICENSE.txt README.txt tools/msi2lmp/README")
if verbose: print(txt)
txt = system("find bench examples potentials python tools/msi2lmp/frc_files -type f -print | xargs unix2dos")
if verbose: print(txt)
# mass rename README to README.txt
txt = system('for f in $(find tools bench examples potentials python -name README -print); do mv -v $f $f.txt; done')
if verbose: print(txt)
# mass rename in.<name> to in.<name>.lmp
txt = system('for f in $(find bench examples -name in.\* -print); do mv -v $f $f.lmp; done')
if verbose: print(txt)
print("Done")
print("Configuring and building installer")
os.chdir(builddir)
if pythonflag:
nsisfile = os.path.join(homedir,"installer","lammps-python.nsis")
elif adminflag:
nsisfile = os.path.join(homedir,"installer","lammps-admin.nsis")
else:
if msixflag:
nsisfile = os.path.join(homedir,"installer","lammps-msix.nsis")
else:
nsisfile = os.path.join(homedir,"installer","lammps-noadmin.nsis")
shutil.copy(nsisfile,os.path.join(builddir,"lammps.nsis"))
shutil.copy(os.path.join(homedir,"installer","FileAssociation.nsh"),os.path.join(builddir,"FileAssociation.nsh"))
shutil.copy(os.path.join(homedir,"installer","lammps.ico"),os.path.join(builddir,"lammps.ico"))
shutil.copy(os.path.join(homedir,"installer","lammps-text-logo-wide.bmp"),os.path.join(builddir,"lammps-text-logo-wide.bmp"))
shutil.copytree(os.path.join(homedir,"installer","envvar"),os.path.join(builddir,"envvar"),symlinks=False)
# define version flag of the installer:
# - use current timestamp, when pulling from develop (for daily builds)
# - parse version from src/version.h when pulling from stable, release, or specific tag
# - otherwise use revflag, i.e. the commit hash
version = revflag
if revflag == 'stable' or revflag == 'release' or rev2.match(revflag):
with open(os.path.join(gitdir,"src","version.h"),'r') as v_file:
verexp = re.compile(r'^.*"(\w+) (\w+) (\w+)".*$')
vertxt = v_file.readline()
verseq = verexp.match(vertxt).groups()
version = "".join(verseq)
elif revflag == 'develop':
version = time.strftime('%Y-%m-%d')
if bitflag == '32':
mingwdir = '/usr/i686-w64-mingw32/sys-root/mingw/bin/'
elif bitflag == '64':
mingwdir = '/usr/x86_64-w64-mingw32/sys-root/mingw/bin/'
if parflag == 'mpi':
txt = system("makensis -DMINGW=%s -DVERSION=%s-MPI -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
else:
txt = system("makensis -DMINGW=%s -DVERSION=%s -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
# clean up after successful build
os.chdir('..')
print("Cleaning up...")
shutil.rmtree(builddir,True)
print("Done.")
|
lammps/lammps-packages
|
mingw-cross/cmake-win-on-linux.py
|
Python
|
mit
| 15,175
|
[
"LAMMPS",
"VTK"
] |
4662e83cb7c8b6fa2c4189c0fe7c99f9e91f0810933e3610f175b7447de4a0fa
|
""" manage PyTables query interface via Expressions """
from __future__ import annotations
import ast
from functools import partial
from typing import Any
import numpy as np
from pandas._libs.tslibs import (
Timedelta,
Timestamp,
)
from pandas._typing import npt
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.core.computation import (
expr,
ops,
scope as _scope,
)
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import (
UndefinedVariableError,
is_term,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: dict[str, Any] | None = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore[misc]
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: dict[str, Any]
condition: str | None
def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
"""create and return a new specialized BinOp from myself"""
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
"""inplace conform rhs"""
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
"""return True if this is a valid field"""
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
"""
return True if this is a valid column name for generation (e.g. an
actual column in the table)
"""
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
"""the kind of my field"""
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
"""the meta of my field"""
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
"""the metadata of my field"""
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
"""create and return the op string for this TermValue"""
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> TermValue:
"""
convert the expression that is in the term to something that is
accepted by pytables
"""
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
result: npt.NDArray[np.intp] | np.intp | int
if v not in metadata:
result = -1
else:
result = metadata.searchsorted(v, side="left")
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: tuple[Any, Any, Index] | None = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
"""invert the filter"""
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
"""return the actual filter format"""
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
"""invert the condition"""
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
"""return the actual ne format"""
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
expr: str
def __init__(
self,
where,
queryables: dict[str, Any] | None = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join([f"({w})" for w in com.flatten(where)])
else:
# _validate_where ensures we otherwise have a string
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
"""create and return the numexpr condition and filter"""
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
"""hold a term value the we use to construct a condition/filter"""
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
"""quote the string if not encoded else encode and return"""
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
"""loose checking if s is a pytables-acceptable expression"""
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
|
rs2/pandas
|
pandas/core/computation/pytables.py
|
Python
|
bsd-3-clause
| 19,641
|
[
"VisIt"
] |
ed5d9f8535b54043c49eedd48015d5bb74654984b8d68a5d0580aa400ce2b225
|
from rdkit import RDConfig
import sys, os
from time import sleep
from multiprocessing import Process, Value
import unittest
from rdkit import Chem
from rdkit.Chem import ChemicalForceFields
from rdkit.Chem import rdMolTransforms
class OptSafe:
def __init__(self):
self.minInfLoop = """minInfLoop
RDKit 3D
7 5 0 0 0 0 0 0 0 0999 V2000
1.7321 -0.5000 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
0.8660 -0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 -0.5000 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
0.5000 -1.3660 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.8660 -1.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.5000 0.3660 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
3.2321 -0.5000 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
3 5 1 0 0 0 0
3 6 1 0 0 0 0
M CHG 2 3 1 7 -1
M END"""
def uffOptFunc(self, v, mol):
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol)
try:
ff.Minimize()
except RuntimeError:
pass
v.value = True
return
def mmffOptFunc(self, v, mol):
mp = ChemicalForceFields.MMFFGetMoleculeProperties(mol)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(mol, mp)
try:
ff.Minimize()
except RuntimeError:
pass
v.value = True
return
def opt(self, mol, optFunc):
OPT_SLEEP_SEC = 0.2
MAX_OPT_SLEEP_SEC = 3
v = Value('b', False)
optProcess = Process(target=optFunc, args=(v, mol))
optProcess.start()
s = 0.0
while ((s < MAX_OPT_SLEEP_SEC) and (not v.value)):
s += OPT_SLEEP_SEC
sleep(OPT_SLEEP_SEC)
if (not v.value):
sys.stderr.write('Killing Minimize() or it will loop indefinitely\n')
optProcess.terminate()
optProcess.join()
return bool(v.value)
class TestCase(unittest.TestCase):
def setUp(self):
self.molB = """butane
RDKit 3D
butane
17 16 0 0 0 0 0 0 0 0999 V2000
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.4280 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.7913 -0.2660 0.9927 H 0 0 0 0 0 0 0 0 0 0 0 0
1.9040 1.3004 -0.3485 C 0 0 0 0 0 0 0 0 0 0 0 0
1.5407 2.0271 0.3782 H 0 0 0 0 0 0 0 0 0 0 0 0
1.5407 1.5664 -1.3411 H 0 0 0 0 0 0 0 0 0 0 0 0
3.3320 1.3004 -0.3485 C 0 0 0 0 0 0 0 0 0 0 0 0
3.6953 1.5162 -1.3532 H 0 0 0 0 0 0 0 0 0 0 0 0
3.8080 0.0192 0.0649 C 0 0 0 0 0 0 0 0 0 0 0 0
3.4447 -0.7431 -0.6243 H 0 0 0 0 0 0 0 0 0 0 0 0
3.4447 -0.1966 1.0697 H 0 0 0 0 0 0 0 0 0 0 0 0
4.8980 0.0192 0.0649 H 0 0 0 0 0 0 0 0 0 0 0 0
3.6954 2.0627 0.3408 H 0 0 0 0 0 0 0 0 0 0 0 0
1.7913 -0.7267 -0.7267 H 0 0 0 0 0 0 0 0 0 0 0 0
-0.3633 0.7267 0.7267 H 0 0 0 0 0 0 0 0 0 0 0 0
-0.3633 -0.9926 0.2660 H 0 0 0 0 0 0 0 0 0 0 0 0
-0.3633 0.2660 -0.9926 H 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
1 15 1 0 0 0 0
1 16 1 0 0 0 0
1 17 1 0 0 0 0
2 3 1 0 0 0 0
2 4 1 0 0 0 0
2 14 1 0 0 0 0
4 5 1 0 0 0 0
4 6 1 0 0 0 0
4 7 1 0 0 0 0
7 8 1 0 0 0 0
7 9 1 0 0 0 0
7 13 1 0 0 0 0
9 10 1 0 0 0 0
9 11 1 0 0 0 0
9 12 1 0 0 0 0
M END"""
def testUFFMinInfLoop(self):
os = OptSafe()
m = Chem.MolFromMolBlock(os.minInfLoop)
self.assertTrue(m)
ok = False
try:
ok = os.opt(m, os.uffOptFunc)
except RuntimeError:
ok = True
pass
self.assertTrue(ok)
def testUFFDistanceConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddDistanceConstraint(1, 3, False, 2.0, 2.0, 1.0e5)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
dist = rdMolTransforms.GetBondLength(conf, 1, 3)
self.assertTrue(dist > 1.99)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddDistanceConstraint(1, 3, True, -0.2, 0.2, 1.0e5)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
dist = rdMolTransforms.GetBondLength(conf, 1, 3)
self.assertTrue(dist > 1.79)
def testUFFAngleConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddAngleConstraint(1, 3, 6, False, 90.0, 90.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
angle = rdMolTransforms.GetAngleDeg(conf, 1, 3, 6)
self.assertTrue(int(angle) == 90)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddAngleConstraint(1, 3, 6, True, -10.0, 10.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
angle = rdMolTransforms.GetAngleDeg(conf, 1, 3, 6)
self.assertTrue(int(angle) == 100)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddAngleConstraint(1, 3, 6, False, -10.0, 10.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
angle = rdMolTransforms.GetAngleDeg(conf, 1, 3, 6)
self.assertEqual(int(angle), 10)
def testUFFTorsionConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
conf = m.GetConformer()
rdMolTransforms.SetDihedralDeg(conf, 1, 3, 6, 8, 15.0)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddTorsionConstraint(1, 3, 6, 8, False, 10.0, 20.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
dihedral = rdMolTransforms.GetDihedralDeg(conf, 1, 3, 6, 8)
self.assertTrue(int(dihedral) == 20)
rdMolTransforms.SetDihedralDeg(conf, 1, 3, 6, 8, -30.0)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddTorsionConstraint(1, 3, 6, 8, True, -10.0, 8.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
dihedral = rdMolTransforms.GetDihedralDeg(conf, 1, 3, 6, 8)
self.assertEquals(int(dihedral), -40)
rdMolTransforms.SetDihedralDeg(conf, 1, 3, 6, 8, -10.0)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
ff.UFFAddTorsionConstraint(1, 3, 6, 8, False, -10.0, 8.0, 1.0e6)
r = ff.Minimize(500)
self.assertTrue(r == 0)
conf = m.GetConformer()
dihedral = rdMolTransforms.GetDihedralDeg(conf, 1, 3, 6, 8)
self.assertTrue(int(dihedral) == -10)
def testUFFPositionConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
conf = m.GetConformer()
p = conf.GetAtomPosition(1)
ff.UFFAddPositionConstraint(1, 0.3, 1.0e5)
r = ff.Minimize()
self.assertTrue(r == 0)
q = conf.GetAtomPosition(1)
self.assertTrue((p - q).Length() < 0.3)
def testUFFFixedAtoms(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
ff = ChemicalForceFields.UFFGetMoleculeForceField(m)
self.assertTrue(ff)
conf = m.GetConformer()
fp = conf.GetAtomPosition(1)
ff.AddFixedPoint(1)
r = ff.Minimize()
self.assertTrue(r == 0)
fq = conf.GetAtomPosition(1)
self.assertTrue((fp - fq).Length() < 0.01)
def testMMFFMinInfLoop(self):
os = OptSafe()
m = Chem.MolFromMolBlock(os.minInfLoop)
self.assertTrue(m)
ok = False
try:
ok = os.opt(m, os.mmffOptFunc)
except RuntimeError:
ok = True
pass
self.assertTrue(ok)
def testMMFFDistanceConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddDistanceConstraint(1, 3, False, 2.0, 2.0, 1.0e5)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
dist = rdMolTransforms.GetBondLength(conf, 1, 3)
self.assertTrue(dist > 1.99)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddDistanceConstraint(1, 3, True, -0.2, 0.2, 1.0e5)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
dist = rdMolTransforms.GetBondLength(conf, 1, 3)
self.assertTrue(dist > 1.79)
def testMMFFAngleConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddAngleConstraint(1, 3, 6, False, 90.0, 90.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
angle = rdMolTransforms.GetAngleDeg(conf, 1, 3, 6)
self.assertTrue(int(angle) == 90)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddAngleConstraint(1, 3, 6, True, -10.0, 10.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
angle = rdMolTransforms.GetAngleDeg(conf, 1, 3, 6)
self.assertTrue(int(angle) == 100)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddAngleConstraint(1, 3, 6, False, -10.0, 10.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
angle = rdMolTransforms.GetAngleDeg(conf, 1, 3, 6)
self.assertEquals(int(angle), 10) #(int(angle) == 10)
def testMMFFTorsionConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
conf = m.GetConformer()
rdMolTransforms.SetDihedralDeg(conf, 1, 3, 6, 8, 15.0)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddTorsionConstraint(1, 3, 6, 8, False, 10.0, 20.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
dihedral = rdMolTransforms.GetDihedralDeg(conf, 1, 3, 6, 8)
self.assertEquals(int(dihedral), 20)
rdMolTransforms.SetDihedralDeg(conf, 1, 3, 6, 8, -30.0)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddTorsionConstraint(1, 3, 6, 8, True, -10.0, 8.0, 100.0)
r = ff.Minimize()
self.assertTrue(r == 0)
conf = m.GetConformer()
dihedral = rdMolTransforms.GetDihedralDeg(conf, 1, 3, 6, 8)
self.assertTrue(int(dihedral) == -40)
rdMolTransforms.SetDihedralDeg(conf, 1, 3, 6, 8, -10.0)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
ff.MMFFAddTorsionConstraint(1, 3, 6, 8, False, -10.0, 8.0, 100.0)
r = ff.Minimize(1000)
self.assertTrue(r == 0)
conf = m.GetConformer()
dihedral = rdMolTransforms.GetDihedralDeg(conf, 1, 3, 6, 8)
self.assertTrue(int(dihedral) == -10)
def testMMFFPositionConstraints(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
conf = m.GetConformer()
p = conf.GetAtomPosition(1)
ff.MMFFAddPositionConstraint(1, 0.3, 1.0e5)
r = ff.Minimize()
self.assertTrue(r == 0)
q = conf.GetAtomPosition(1)
self.assertTrue((p - q).Length() < 0.3)
def testMMFFFixedAtoms(self):
m = Chem.MolFromMolBlock(self.molB, True, False)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m, mp)
self.assertTrue(ff)
conf = m.GetConformer()
fp = conf.GetAtomPosition(1)
ff.AddFixedPoint(1)
r = ff.Minimize()
self.assertTrue(r == 0)
fq = conf.GetAtomPosition(1)
self.assertTrue((fp - fq).Length() < 0.01)
if __name__ == '__main__':
unittest.main()
|
ptosco/rdkit
|
Code/ForceField/Wrap/testConstraints.py
|
Python
|
bsd-3-clause
| 12,188
|
[
"RDKit"
] |
a21c30278794e90a126550426a717714ebebdd52a979c084befd3ce11727a2dc
|
# coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation."""
import functools
from absl import flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(
tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)), lambda: func(x), lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random.uniform([], tf.maximum(1.0 - max_delta, 0),
1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
image = tf.image.random_brightness(image, max_delta=max_delta)
else:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(
x, lower=1-contrast, upper=1+contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random.shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def _compute_crop_shape(
image_height, image_width, aspect_ratio, crop_proportion):
"""Compute aspect ratio-preserving shape for central crop.
The resulting shape retains `crop_proportion` along one side and a proportion
less than or equal to `crop_proportion` along the other side.
Args:
image_height: Height of image to be cropped.
image_width: Width of image to be cropped.
aspect_ratio: Desired aspect ratio (width / height) of output.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
crop_height: Height of image after cropping.
crop_width: Width of image after cropping.
"""
image_width_float = tf.cast(image_width, tf.float32)
image_height_float = tf.cast(image_height, tf.float32)
def _requested_aspect_ratio_wider_than_image():
crop_height = tf.cast(
tf.math.rint(crop_proportion / aspect_ratio * image_width_float),
tf.int32)
crop_width = tf.cast(
tf.math.rint(crop_proportion * image_width_float), tf.int32)
return crop_height, crop_width
def _image_wider_than_requested_aspect_ratio():
crop_height = tf.cast(
tf.math.rint(crop_proportion * image_height_float), tf.int32)
crop_width = tf.cast(
tf.math.rint(crop_proportion * aspect_ratio * image_height_float),
tf.int32)
return crop_height, crop_width
return tf.cond(
aspect_ratio > image_width_float / image_height_float,
_requested_aspect_ratio_wider_than_image,
_image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion):
"""Crops to center of image and rescales to desired size.
Args:
image: Image Tensor to crop.
height: Height of image to be cropped.
width: Width of image to be cropped.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
A `height` x `width` x channels Tensor holding a central crop of `image`.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
crop_height, crop_width = _compute_crop_shape(
image_height, image_width, width / height, crop_proportion)
offset_height = ((image_height - crop_height) + 1) // 2
offset_width = ((image_width - crop_width) + 1) // 2
image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_height, crop_width)
image = tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope or 'distorted_bounding_box_crop'):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
return image
def crop_and_resize(image, height, width):
"""Make a random crop and resize it to height `height` and width `width`.
Args:
image: Tensor representing the image.
height: Desired image height.
width: Desired image width.
Returns:
A `height` x `width` x channels Tensor holding a random crop of `image`.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = width / height
image = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),
area_range=(0.08, 1.0),
max_attempts=100,
scope=None)
return tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size / 2, dtype=tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), dtype=tf.float32)
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_crop_with_resize(image, height, width, p=1.0):
"""Randomly crop and resize an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: Probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
def _transform(image): # pylint: disable=missing-docstring
image = crop_and_resize(image, height, width)
return image
return random_apply(_transform, p=p, x=image)
def random_color_jitter(image, p=1.0, strength=1.0,
impl='simclrv2'):
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=strength, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
def random_blur(image, height, width, p=1.0):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height//10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def batch_random_blur(images_list, height, width, blur_probability=0.5):
"""Apply efficient batch data transformations.
Args:
images_list: a list of image tensors.
height: the height of image.
width: the width of image.
blur_probability: the probaility to apply the blur operator.
Returns:
Preprocessed feature list.
"""
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(
tf.less(tf.random.uniform(shape, 0, 1, dtype=tf.float32), p),
tf.float32)
return selector
new_images_list = []
for images in images_list:
images_new = random_blur(images, height, width, p=1.)
selector = generate_selector(blur_probability, tf.shape(images)[0])
images = images_new * selector + images * (1 - selector)
images = tf.clip_by_value(images, 0., 1.)
new_images_list.append(images)
return new_images_list
def preprocess_for_train(image,
height,
width,
color_distort=True,
crop=True,
flip=True,
impl='simclrv2'):
"""Preprocesses the given image for training.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
color_distort: Whether to apply the color distortion.
crop: Whether to crop the image.
flip: Whether or not to flip left and right of an image.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = random_crop_with_resize(image, height, width)
if flip:
image = tf.image.random_flip_left_right(image)
if color_distort:
image = random_color_jitter(image, strength=FLAGS.color_jitter_strength,
impl=impl)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_for_eval(image, height, width, crop=True):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
crop: Whether or not to (center) crop the test images.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_image(image, height, width, is_training=False,
color_distort=True, test_crop=True):
"""Preprocesses the given image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
is_training: `bool` for whether the preprocessing is for training.
color_distort: whether to apply the color distortion.
test_crop: whether or not to extract a central crop of the images
(as for standard ImageNet evaluation) during the evaluation.
Returns:
A preprocessed image `Tensor` of range [0, 1].
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if is_training:
return preprocess_for_train(image, height, width, color_distort)
else:
return preprocess_for_eval(image, height, width, test_crop)
|
google-research/simclr
|
tf2/data_util.py
|
Python
|
apache-2.0
| 18,220
|
[
"Gaussian"
] |
8c73c77f018b6f13267ee97c90fa24b978597bb11f97f7b3910b2490d44e59fc
|
"""
This file is part of The Cannon analysis project.
Copyright 2014 Melissa Ness.
# urls
- http://iopscience.iop.org/1538-3881/146/5/133/suppdata/aj485195t4_mrt.txt for calibration stars
- http://data.sdss3.org/irSpectrumDetail?locid=4330&commiss=0&apogeeid=2M17411636-2903150&show_aspcap=True object explorer
- http://data.sdss3.org/basicIRSpectra/searchStarA
- http://data.sdss3.org/sas/dr10/apogee/spectro/redux/r3/s3/a3/ for the data files
# to-do
- need to add a test that the wavelength range is the same - and if it isn't interpolate to the same range
- format PEP8-ish (four-space tabs, for example)
- take logg_cut as an input
- extend to perform quadratic fitting
"""
#from astropy.io import fits as pyfits
import pyfits
import scipy
import glob
import pickle
import pylab
from scipy import interpolate
from scipy import ndimage
from scipy import optimize as opt
import numpy as np
normed_training_data = 'normed_data.pickle'
def weighted_median(values, weights, quantile):
"""weighted_median
keywords
--------
values: ndarray
input values
weights: ndarray
weights to apply to each value in values
quantile: float
quantile selection
returns
-------
val: float
median value
"""
sindx = np.argsort(values)
cvalues = 1. * np.cumsum(weights[sindx])
cvalues = cvalues / cvalues[-1]
foo = sindx[cvalues > quantile]
if len(foo) == 0:
return values[0]
indx = foo[0]
return values[indx]
def continuum_normalize_tsch(dataall,mask, pixlist, delta_lambda=150):
pixlist = list(pixlist)
Nlambda, Nstar, foo = dataall.shape
continuum = np.zeros((Nlambda, Nstar))
dataall_flat = np.ones((Nlambda, Nstar, 3))
for jj in range(Nstar):
bad_a = np.logical_or(np.isnan(dataall[:, jj, 1]) ,np.isinf(dataall[:,jj, 1]))
bad_b = np.logical_or(dataall[:, jj, 2] <= 0. , np.isnan(dataall[:, jj, 2]))
bad = np.logical_or( np.logical_or(bad_a, bad_b) , np.isinf(dataall[:, jj, 2]))
dataall[bad, jj, 1] = 0.
dataall[bad, jj, 2] = np.Inf #LARGE#np.Inf #100. #np.Inf
continuum = np.zeros((Nlambda, Nstar))
var_array = 200**2*np.ones((len(dataall)))
var_array[pixlist] = 0.000
ivar = 1. / ((dataall[:, jj, 2] ** 2) + var_array)
bad = np.isnan(ivar)
ivar[bad] = 0
bad = np.isinf(ivar)
ivar[bad] = 0
take1 = logical_and(dataall[:,jj,0] > 15150, dataall[:,jj,0] < 15800)
take2 = logical_and(dataall[:,jj,0] > 15890, dataall[:,jj,0] < 16430)
take3 = logical_and(dataall[:,jj,0] > 16490, dataall[:,jj,0] < 16950)
fit1 = numpy.polynomial.chebyshev.Chebyshev.fit(x=dataall[take1,jj,0], y=dataall[take1,jj,1], w=ivar[take1],deg=2)
fit2 = numpy.polynomial.chebyshev.Chebyshev.fit(x=dataall[take2,jj,0], y=dataall[take2,jj,1], w=ivar[take2],deg=2)
fit3 = numpy.polynomial.chebyshev.Chebyshev.fit(x=dataall[take3,jj,0], y=dataall[take3,jj,1], w=ivar[take3],deg=2)
continuum[take1,jj] = fit1(dataall[take1,jj,0])
continuum[take2,jj] = fit2(dataall[take2,jj,0])
continuum[take3,jj] = fit3(dataall[take3,jj,0])
dataall_flat[:, jj, 0] = dataall[:,jj,0]
dataall_flat[take1, jj, 1] = dataall[take1,jj,1]/fit1(dataall[take1,0,0])
dataall_flat[take2, jj, 1] = dataall[take2,jj,1]/fit2(dataall[take2,0,0])
dataall_flat[take3, jj, 1] = dataall[take3,jj,1]/fit3(dataall[take3,0,0])
dataall_flat[take1, jj, 2] = dataall[take1,jj,2]/fit1(dataall[take1,0,0])
dataall_flat[take2, jj, 2] = dataall[take2,jj,2]/fit2(dataall[take2,0,0])
dataall_flat[take3, jj, 2] = dataall[take3,jj,2]/fit3(dataall[take3,0,0])
for star in range(Nstar):
print "get_continuum(): working on star" ,star
for jj in range(Nstar):
bad_a = np.logical_or(np.isnan(dataall_flat[:, jj, 1]) ,np.isinf(dataall_flat[:,jj, 1]))
bad_b = np.logical_or(dataall_flat[:, jj, 2] <= 0. , np.isnan(dataall_flat[:, jj, 2]))
bad = np.logical_or(bad_a, bad_b)
LARGE =200.
dataall_flat[bad,jj, 1] = 1.
dataall_flat[bad,jj, 2] = LARGE
bad = np.where(dataall[:, jj, 2] > LARGE)
dataall_flat[bad,jj, 1] = 1.
dataall_flat[bad,jj, 2] = LARGE
bad = np.isnan(dataall[:, jj, 1])
dataall_flat[bad,jj, 1] = 1.
dataall_flat[bad,jj, 2] = LARGE
bad = np.isinf(dataall_flat[:, jj, 2])
dataall_flat[bad,jj, 1] = 1.
dataall_flat[bad,jj, 2] = LARGE
maskbin1 = [np.int(a) & 2**0 for a in mask[:,jj,0]]
maskbin2 = [np.int(a) & 2**12 for a in mask[:,jj,0]]
maskbin3 = [np.int(a) & 2**13 for a in mask[:,jj,0]]
# below; includes bad pixel mask but performance is better without - many bad pixels with combined frames. fewer with individual
#bad = logical_or(logical_or(maskbin2 != 0, maskbin1 != 0), maskbin3 != 0)
#dataall_flat[bad,jj, 2] = LARGE
return dataall_flat, continuum
def continuum_normalize(dataall, SNRall, delta_lambda=50):
"""continuum_normalize
keywords
--------
dataall: ndarray, shape=(Nlambda, Nstar, 3)
wavelengths, flux densities, errors
delta_lambda:
half-width of median region in angstroms
returns
-------
continuum: (Nlambda, Nstar)
continuum level
.. note::
* does a lot of stuff *other* than continuum normalization
.. todo::
* bugs: for loops!
"""
Nlambda, Nstar, foo = dataall.shape
continuum = np.zeros((Nlambda, Nstar))
file_in = open('coeffs_2nd_order_test18.pickle', 'r')
dataall2, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
# sanitize inputs
for jj in range(Nstar):
# #BROKEN
bad_a = np.logical_or(np.isnan(dataall[:, jj, 1]) ,np.isinf(dataall[:,jj, 1]))
bad_b = np.logical_or(dataall[:, jj, 2] <= 0. , np.isnan(dataall[:, jj, 2]))
bad = np.logical_or( np.logical_or(bad_a, bad_b) , np.isinf(dataall[:, jj, 2]))
dataall[bad, jj, 1] = 0.
dataall[bad, jj, 2] = np.Inf #LARGE#np.Inf #100. #np.Inf
continuum = np.zeros((Nlambda, Nstar))
assert foo == 3
for star in range(Nstar):
#x = [0.02, 0.08]
#y = [90.0, 50.0]
#z = np.polyfit(log(x), log(y), 1)
#p = np.poly1d(z)
#good = dataall[:,star,2] < 0.1
good1 = logical_and(coeffs[:,0] > 0.998, coeffs[:,0] < 1.002 )
good2 = logical_and(dataall[:,star,2] < 0.5, dataall[:,star,1] > 0.6)
good3 = logical_and(logical_and(abs(coeffs[:,1]) <0.005/1000., abs(coeffs[:,2]) <0.005), abs(coeffs[:,3]) < 0.005)
good = logical_and(good2,good3)
medtest = median(dataall[:,star,1][good])
#stdval = std(dataall[:,star,1][good])
snrval = SNRall[star]
if snrval >= 100.0:
q = 0.90
if snrval <= 15.00:
q = 0.50
if logical_and(snrval > 15.0, snrval < 100.0):
#q = e**(-0.06891241*log(snrval)**2 + 0.76047574*log(snrval) - 2.14601435)
#q = e**(-0.094*log(snrval)**2 + 0.95*log(snrval) - 2.50)
q = e**(0.26*log(snrval)**2 - 1.83*log(snrval) + 2.87)
print "get_continuum(): working on star" ,star
for ll, lam in enumerate(dataall[:, 0, 0]):
if dataall[ll, star, 0] != lam:
print dataall[ll,star,0], lam , dataall[ll,0,0]
print ll, star
print ll+1, star+1, dataall[ll+1, star+1, 0], dataall[ll+1,0,0]
print ll+2, star+2, dataall[ll+2, star+2, 0], dataall[ll+2,0,0]
assert False
indx = (np.where(abs(dataall[:, star, 0] - lam) < delta_lambda))[0]
coeffs_indx = coeffs[indx][:,0]
test1 = logical_and(coeffs_indx > 0.995, coeffs_indx < 1.005)
test2 = logical_or(coeffs_indx <= 0.995, coeffs_indx >= 1.005)
#test1 = logical_and( b[indx star, 1] > 0.6, logical_and(logical_and(abs(coeffs[indx,1]) <0.005/1000., abs(coeffs[indx][:,2]) <0.005), abs(coeffs[indx,3]) < 0.005))
#test2 = logical_or(logical_or(abs(coeffs[indx,1]) >= 0.005/1000., abs(coeffs[indx,2]) >= 0.005), logical_or( b[indx,star,1] <= 0.6, abs(coeffs[indx,3]) >= 0.005))
#test2 = logical_or(coeffs_indx <= 0.998, coeffs_indx >= 1.002)
coeffs_indx[test2] = 100**2.
coeffs_indx[test1] = 0
ivar = 1. / ((dataall[indx, star, 2] ** 2) + coeffs_indx)
ivar = 1. / (dataall[indx, star, 2] ** 2)
ivar = np.array(ivar)
#q = 0.85
q = 0.90
continuum[ll, star] = weighted_median(dataall[indx, star, 1], ivar, q)
for jj in range(Nstar):
bad = np.where(continuum[:,jj] <= 0)
continuum[bad,jj] = 1.
dataall[:, jj, 1] /= continuum[:,jj]
dataall[:, jj, 2] /= continuum[:,jj]
#dataall[:, jj, 1] /= medtest
#dataall[:, jj, 2] /= medtest
dataall[bad,jj, 1] = 1.
dataall[bad,jj, 2] = LARGE
bad = np.where(dataall[:, jj, 2] > LARGE)
dataall[bad,jj, 1] = 1.
dataall[bad,jj, 2] = LARGE
return dataall
def get_bad_pixel_mask(testfile,nlam):
name = testfile.split('.txt')[0]
adir = open(testfile, 'r')
al2 = adir.readlines()
bl2 = []
bl3 = []
for each in al2:
bl2.append(each.strip())
bl3.append((each.split('/'))[-2] +'/'+ ("apStar-s3-")+each.split('aspcapStar-v304-')[-1].strip())
dirin = ['/home/ness/new_laptop/Apogee_apStar/data.sdss3.org/sas/dr10/apogee/spectro/redux/r3/s3/'+each for each in bl3]
mask = np.zeros((nlam, len(bl2),1))
for jj,each in enumerate(dirin):
a=pyfits.open(each)
mask[:,jj,0] = (np.atleast_2d(a[3].data))[0]
return mask
def get_normalized_test_data_tsch(testfile, pixlist):
name = testfile.split('.txt')[0]
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
bl2.append(each.strip())
ids = []
for each in bl2:
ids.append(each.split('-2M')[-1].split('.fits')[0])
if glob.glob(name+'test.pickle'):
file_in2 = open(name+'test.pickle', 'r')
testdata = pickle.load(file_in2)
file_in2.close()
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
bl2.append(each.strip())
SNR = np.zeros((len(bl2)))
for jj,each in enumerate(bl2):
a = pyfits.open(each)
#SNR[jj] = a[0].header['SNRVIS4']
SNR[jj] = a[0].header['SNR']
file_in2 = open(name+'_SNR.pickle', 'w')
pickle.dump(SNR, file_in2)
file_in2.close()
return testdata, ids
SNRall = np.zeros(len(bl2))
for jj,each in enumerate(bl2):
a = pyfits.open(each)
if shape(a[1].data) != (8575,):
ydata = a[1].data[0]
ysigma = a[2].data[0]
len_data = a[2].data[0]
#ydata = a[1].data[3] # SNR test - NOTE THIS IS FOR TEST TO READ IN A SINGLE VISIT - TESTING ONLY - OTHERWISE SHOULD BE 0 TO READ IN THE MEDIAN SPECTRA
#ysigma = a[2].data[3]
#len_data = a[2].data[3]
if jj == 0:
nlam = len(a[1].data[0])
testdata = np.zeros((nlam, len(bl2), 3))
if shape(a[1].data) == (8575,):
ydata = a[1].data
ysigma = a[2].data
len_data = a[2].data
if jj == 0:
nlam = len(a[1].data)
testdata = np.zeros((nlam, len(bl2), 3))
start_wl = a[1].header['CRVAL1']
diff_wl = a[1].header['CDELT1']
SNR = a[0].header['SNR']
#SNR = a[0].header['SNRVIS4']
SNRall[jj] = SNR
val = diff_wl*(nlam) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10**aval for aval in wl_full_log]
xdata = wl_full
testdata[:, jj, 0] = xdata
testdata[:, jj, 1] = ydata
testdata[:, jj, 2] = ysigma
mask = get_bad_pixel_mask(testfile,nlam)
#for jj,each in enumerate(bl2):
# bad = mask[:,jj] != 0
# testdata[bad, jj, 2] = 200.
testdata, contall = continuum_normalize_tsch(testdata,mask,pixlist, delta_lambda=50)
file_in = open(name+'test.pickle', 'w')
file_in2 = open(name+'test_SNR.pickle', 'w')
pickle.dump(testdata, file_in)
pickle.dump(SNRall, file_in2)
file_in.close()
file_in2.close()
return testdata , ids # not yet implemented but at some point should probably save ids into the normed pickle file
def get_normalized_test_data(testfile,noise=0):
"""
inputs
------
testfile: str
the file in with the list of fits files want to test - if normed, move on,
if not normed, norm it
if not noisify carry on as normal, otherwise do the noise tests
returns
-------
testdata:
"""
name = testfile.split('.txt')[0]
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
bl2.append(each.strip())
ids = []
for each in bl2:
ids.append(each.split('-2M')[-1].split('.fits')[0])
if noise == 0:
if glob.glob(name+'.pickle'):
file_in2 = open(name+'.pickle', 'r')
testdata = pickle.load(file_in2)
file_in2.close()
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
bl2.append(each.strip())
SNR = np.zeros((len(bl2)))
for jj,each in enumerate(bl2):
a = pyfits.open(each)
#SNR[jj] = a[0].header['SNRVIS4']
SNR[jj] = a[0].header['SNR']
file_in2 = open(name+'_SNR.pickle', 'w')
pickle.dump(SNR, file_in2)
file_in2.close()
return testdata, ids
if noise == 1:
if not glob.glob(name+'._SNR.pickle'):
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
# bl2.append(testdir+each.strip())
bl2.append(each.strip())
SNR = np.zeros((len(bl2)))
for jj,each in enumerate(bl2):
a = pyfits.open(each)
SNR[jj] = a[0].header['SNR']
#SNR[jj] = a[0].header['SNRVIS4']
file_in2 = open(name+'_SNR.pickle', 'w')
pickle.dump(SNR, file_in2)
file_in2.close()
if glob.glob(name+'.pickle'):
if glob.glob(name+'_SNR.pickle'):
file_in2 = open(name+'.pickle', 'r')
testdata = pickle.load(file_in2)
file_in2.close()
file_in3 = open(name+'_SNR.pickle', 'r')
SNR = pickle.load(file_in3)
file_in3.close()
ydata = testdata[:,:,1]
ysigma = testdata[:,:,2]
testdata[:,:,1], testdata[:,:,2] = add_noise(ydata, ysigma, SNR)
return testdata, ids
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
bl2.append(each.strip())
ids = []
for each in bl2:
ids.append(each.split('-2M')[-1].split('.fits')[0])
SNRall = np.zeros(len(bl2))
for jj,each in enumerate(bl2):
a = pyfits.open(each)
if shape(a[1].data) != (8575,):
ydata = a[1].data[0]
ysigma = a[2].data[0]
len_data = a[2].data[0]
#ydata = a[1].data[3] # SNR test - NOTE THIS IS FOR TEST TO READ IN A SINGLE VISIT - TESTING ONLY - OTHERWISE SHOULD BE 0 TO READ IN THE MEDIAN SPECTRA
#ysigma = a[2].data[3]
#len_data = a[2].data[3]
if jj == 0:
nlam = len(a[1].data[0])
testdata = np.zeros((nlam, len(bl2), 3))
if shape(a[1].data) == (8575,):
ydata = a[1].data
ysigma = a[2].data
len_data = a[2].data
if jj == 0:
nlam = len(a[1].data)
testdata = np.zeros((nlam, len(bl2), 3))
start_wl = a[1].header['CRVAL1']
diff_wl = a[1].header['CDELT1']
SNR = a[0].header['SNR']
#SNR = a[0].header['SNRVIS4']
SNRall[jj] = SNR
val = diff_wl*(nlam) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10**aval for aval in wl_full_log]
xdata = wl_full
testdata[:, jj, 0] = xdata
testdata[:, jj, 1] = ydata
testdata[:, jj, 2] = ysigma
testdata = continuum_normalize(testdata,SNRall) # testdata
file_in = open(name+'.pickle', 'w')
file_in2 = open(name+'_SNR.pickle', 'w')
pickle.dump(testdata, file_in)
pickle.dump(SNRall, file_in2)
file_in.close()
file_in2.close()
return testdata , ids # not yet implemented but at some point should probably save ids into the normed pickle file
def get_normalized_training_data_tsch(pixlist):
if glob.glob(normed_training_data):
file_in2 = open(normed_training_data, 'r')
dataall, metaall, labels, Ametaall, cluster_name, ids = pickle.load(file_in2)
file_in2.close()
return dataall, metaall, labels, Ametaall, cluster_name, ids
fn = 'starsin_SFD_Pleiades.txt'
fn = 'mkn_labels_Atempfeh_edit.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
fn = 'mkn_TCA_edit.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
fn = 'test18_badremoved.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
fn = 'test18.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
#T_est,g_est,feh_est,T_A, g_A, feh_A = np.loadtxt(fn, usecols = (4,6,8,3,5,7), unpack =1)
#if logical_or(fn == 'test18_badremoved.txt', fn == 'starsin_SFD_Pleiades.txt'):
if logical_or(fn == 'test18.txt', fn == 'starsin_SFD_Pleiades.txt'):
T_est,g_est,feh_est,T_A, g_A, feh_A = np.loadtxt(fn, usecols = (4,6,8,3,5,7), unpack =1)
if fn == 'mkn_labels_Atempfeh_edit.txt':
T_est,g_est,feh_est,T_A, g_A, feh_A = np.loadtxt(fn, usecols = (3,5,7,2,4,6), unpack =1)
if fn == 'mkn_TCA_edit.txt':
T_est,g_est,feh_est,T_A, g_A, feh_A = np.loadtxt(fn, usecols = (2,3,4,2,3,4), unpack =1)
labels = ["teff", "logg", "feh"]
a = open(fn, 'r')
al = a.readlines()
bl = []
cluster_name = []
ids = []
for each in al:
bl.append(each.split()[0])
cluster_name.append(each.split()[1])
ids.append(each.split()[0].split('-2M')[-1].split('.fits')[0])
for jj,each in enumerate(bl):
each = each.strip('\n')
a = pyfits.open(each)
b = pyfits.getheader(each)
start_wl = a[1].header['CRVAL1']
diff_wl = a[1].header['CDELT1']
print np.atleast_2d(a[1].data).shape
if jj == 0:
nmeta = len(labels)
nlam = len(a[1].data)
#nlam = len(a[1].data[0])
val = diff_wl*(nlam) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
ydata = (np.atleast_2d(a[1].data))[0]
ydata_err = (np.atleast_2d(a[2].data))[0]
ydata_flag = (np.atleast_2d(a[3].data))[0]
assert len(ydata) == nlam
wl_full = [10**aval for aval in wl_full_log]
xdata= np.array(wl_full)
ydata = np.array(ydata)
ydata_err = np.array(ydata_err)
starname2 = each.split('.fits')[0]+'.txt'
sigma = (np.atleast_2d(a[2].data))[0]# /y1
if jj == 0:
npix = len(xdata)
dataall = np.zeros((npix, len(bl), 3))
metaall = np.ones((len(bl), nmeta))
Ametaall = np.ones((len(bl), nmeta))
if jj > 0:
assert xdata[0] == dataall[0, 0, 0]
dataall[:, jj, 0] = xdata
dataall[:, jj, 1] = ydata
dataall[:, jj, 2] = sigma
for k in range(0,len(bl)):
# must be synchronised with labels
metaall[k,0] = T_est[k]
metaall[k,1] = g_est[k]
metaall[k,2] = feh_est[k]
Ametaall[k,0] = T_A[k]
Ametaall[k,1] = g_A[k]
Ametaall[k,2] = feh_A[k]
pixlist = list(pixlist)
mask = np.zeros((nlam, len(bl),1))
#mask = get_bad_pixel_mask('test18_names.txt',nlam)
dataall, contall = continuum_normalize_tsch(dataall,mask, pixlist, delta_lambda=50)
file_in = open(normed_training_data, 'w')
pickle.dump((dataall, metaall, labels, Ametaall, cluster_name, ids), file_in)
file_in.close()
return dataall, metaall, labels , Ametaall, cluster_name, ids
def get_normalized_training_data():
if glob.glob(normed_training_data):
file_in2 = open(normed_training_data, 'r')
dataall, metaall, labels, Ametaall, cluster_name, ids = pickle.load(file_in2)
file_in2.close()
return dataall, metaall, labels, Ametaall, cluster_name, ids
fn = "starsin_test2.txt"
fn = "starsin_test.txt"
fn = "starsin_new_all_ordered.txt"
fn = "test4_selfg.txt"
fn = 'test14.txt' # this is for teff < 600 cut which worked quite nicely
fn = 'mkn_labels_edit.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
fn = 'mkn_labels_Atempfeh_edit.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
fn = 'test18.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
#fn = 'test18_apstar.txt' # this is for using all stars ejmk < 0.3 but with offest to aspcap values done in a consistent way to rest of labels
T_est,g_est,feh_est,T_A, g_A, feh_A = np.loadtxt(fn, usecols = (4,6,8,3,5,7), unpack =1)
#T_est,g_est,feh_est,T_A, g_A, feh_A = np.loadtxt(fn, usecols = (3,5,7,2,4,6), unpack =1)
labels = ["teff", "logg", "feh"]
a = open(fn, 'r')
al = a.readlines()
bl = []
cluster_name = []
ids = []
for each in al:
bl.append(each.split()[0])
cluster_name.append(each.split()[1])
ids.append(each.split()[0].split('-2M')[-1].split('.fits')[0])
SNRall = np.zeros((len(bl)))
for jj,each in enumerate(bl):
each = each.strip('\n')
a = pyfits.open(each)
b = pyfits.getheader(each)
SNRall[jj] = a[0].header['SNR']
start_wl = a[1].header['CRVAL1']
diff_wl = a[1].header['CDELT1']
print np.atleast_2d(a[1].data).shape
if jj == 0:
nmeta = len(labels)
nlam = len(a[1].data)
#nlam = len(a[1].data[0])
val = diff_wl*(nlam) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
ydata = (np.atleast_2d(a[1].data))[0]
ydata_err = (np.atleast_2d(a[2].data))[0]
ydata_flag = (np.atleast_2d(a[3].data))[0]
assert len(ydata) == nlam
wl_full = [10**aval for aval in wl_full_log]
xdata= np.array(wl_full)
ydata = np.array(ydata)
ydata_err = np.array(ydata_err)
starname2 = each.split('.fits')[0]+'.txt'
sigma = (np.atleast_2d(a[2].data))[0]# /y1
if jj == 0:
npix = len(xdata)
dataall = np.zeros((npix, len(bl), 3))
metaall = np.ones((len(bl), nmeta))
Ametaall = np.ones((len(bl), nmeta))
if jj > 0:
assert xdata[0] == dataall[0, 0, 0]
dataall[:, jj, 0] = xdata
dataall[:, jj, 1] = ydata
dataall[:, jj, 2] = sigma
for k in range(0,len(bl)):
# must be synchronised with labels
metaall[k,0] = T_est[k]
metaall[k,1] = g_est[k]
metaall[k,2] = feh_est[k]
Ametaall[k,0] = T_A[k]
Ametaall[k,1] = g_A[k]
Ametaall[k,2] = feh_A[k]
dataall = continuum_normalize(dataall,SNRall) #dataall
file_in = open(normed_training_data, 'w')
pickle.dump((dataall, metaall, labels, Ametaall, cluster_name, ids), file_in)
file_in.close()
return dataall, metaall, labels , Ametaall, cluster_name, ids
def add_noise(ydata, ysigma, SNR):
factor = 10.000
#factor = ((SNR*1./30)**2 - 1)**0.5
#y_noise_level = ((factor)/np.array(SNR))
y_noise_level = sqrt(1+(factor -1)**2)*ysigma
#y_noise_level = ((1+(factor-1)**2)**0.5)*ysigma
y_noise_level_all = normal(0,y_noise_level)
#y_noise_level_all = [ normal(0, a, len(ydata)) for a in y_noise_level]
#sigma_noise_level = abs(normal(0, (factor)*ysigma**2) )
sigma_noise_level = abs(normal(0, sqrt(1+(factor-1)**2)*ysigma**2) )
ydata_n = ydata + array(y_noise_level_all)
#ydata_n = ydata + array(y_noise_level_all).T
ysigma_n = (ysigma**2 + sigma_noise_level)**0.5
return ydata_n, ysigma_n
#def add_noise(ydata, ysigma, SNR):
# factor = 10.000
# factor = ((SNR*1./30)**2 - 1)**0.5
# #y_noise_level = ((factor-1)/np.array(SNR))*3.1**0.5 #3.1 is the number of pixels in a resolution element
# y_noise_level = ((factor)/np.array(SNR)) #3.1 is the number of pixels in a resolution element
# y_noise_level_all = [ normal(0, a, len(ydata)) for a in y_noise_level]
# sigma_noise_level = abs(normal(0, (factor)*ysigma**2) )
# ydata_n = ydata + array(y_noise_level_all).T
# ysigma_n = (ysigma**2 + sigma_noise_level)**0.5
# return ydata_n, ysigma_n
def do_one_regression_at_fixed_scatter(data, features, scatter):
"""
Parameters
----------
data: ndarray, [nobjs, 3]
wavelengths, fluxes, invvars
meta: ndarray, [nobjs, nmeta]
Teff, Feh, etc, etc
scatter:
Returns
-------
coeff: ndarray
coefficients of the fit
MTCinvM: ndarray
inverse covariance matrix for fit coefficients
chi: float
chi-squared at best fit
logdet_Cinv: float
inverse of the log determinant of the cov matrice
:math:`\sum(\log(Cinv))`
"""
# least square fit
#pick = logical_and(data[:,1] < np.median(data[:,1]) + np.std(data[:,1])*3. , data[:,1] > median(data[:,1]) - np.std(data[:,1])*3.)#5*std(data[:,1]) )
Cinv = 1. / (data[:, 2] ** 2 + scatter ** 2) # invvar slice of data
M = features
MTCinvM = np.dot(M.T, Cinv[:, None] * M) # craziness b/c Cinv isnt a matrix
x = data[:, 1] # intensity slice of data
MTCinvx = np.dot(M.T, Cinv * x)
try:
coeff = np.linalg.solve(MTCinvM, MTCinvx)
except np.linalg.linalg.LinAlgError:
print MTCinvM, MTCinvx, data[:,0], data[:,1], data[:,2]
print features
assert np.all(np.isfinite(coeff))
chi = np.sqrt(Cinv) * (x - np.dot(M, coeff))
logdet_Cinv = np.sum(np.log(Cinv))
return (coeff, MTCinvM, chi, logdet_Cinv )
def do_one_regression(data, metadata):
"""
does a regression at a single wavelength to fit calling the fixed scatter routine
# inputs:
"""
ln_s_values = np.arange(np.log(0.0001), 0., 0.5)
chis_eval = np.zeros_like(ln_s_values)
for ii, ln_s in enumerate(ln_s_values):
foo, bar, chi, logdet_Cinv = do_one_regression_at_fixed_scatter(data, metadata, scatter = np.exp(ln_s))
chis_eval[ii] = np.sum(chi * chi) - logdet_Cinv
if np.any(np.isnan(chis_eval)):
s_best = np.exp(ln_s_values[-1])
return do_one_regression_at_fixed_scatter(data, metadata, scatter = s_best) + (s_best, )
lowest = np.argmin(chis_eval)
#if lowest == 0 or lowest == len(ln_s_values) + 1:
if lowest == 0 or lowest == len(ln_s_values)-1:
s_best = np.exp(ln_s_values[lowest])
return do_one_regression_at_fixed_scatter(data, metadata, scatter = s_best) + (s_best, )
#print data
#print metadata
#print "LOWEST" , lowest
ln_s_values_short = ln_s_values[np.array([lowest-1, lowest, lowest+1])]
chis_eval_short = chis_eval[np.array([lowest-1, lowest, lowest+1])]
z = np.polyfit(ln_s_values_short, chis_eval_short, 2)
f = np.poly1d(z)
fit_pder = np.polyder(z)
fit_pder2 = pylab.polyder(fit_pder)
s_best = np.exp(np.roots(fit_pder)[0])
return do_one_regression_at_fixed_scatter(data, metadata, scatter = s_best) + (s_best, )
def do_regressions(dataall, features):
"""
"""
nlam, nobj, ndata = dataall.shape
nobj, npred = features.shape
featuresall = np.zeros((nlam,nobj,npred))
featuresall[:, :, :] = features[None, :, :]
return map(do_one_regression, dataall, featuresall)
def train(dataall, metaall, order, fn, Ametaall,cluster_name, logg_cut=100., teff_cut=0., leave_out=None):
"""
- `leave out` must be in the correct form to be an input to `np.delete`
"""
#good = np.logical_and((metaall[:, 1] < logg_cut), (metaall[:,0] > teff_cut) )
#dataall = dataall[:, good]
#metaall = metaall[good]
#nstars, nmeta = metaall.shape
if leave_out is not None: #
dataall = np.delete(dataall, [leave_out], axis = 1)
metaall = np.delete(metaall, [leave_out], axis = 0)
Ametaall = np.delete(Ametaall, [leave_out], axis = 0)
diff_t = np.abs(array(metaall[:,0] - Ametaall[:,0]) )
#good = np.logical_and((metaall[:, 1] < logg_cut), (diff_t < 600. ) )
good = np.logical_and((metaall[:, 1] > 0.2), (diff_t < 6000. ) )
#good = np.logical_and((metaall[:, 1] > -2.2), (diff_t < 600. ) )
dataall = dataall[:, good]
metaall = metaall[good]
nstars, nmeta = metaall.shape
offsets = np.mean(metaall, axis=0)
features = np.ones((nstars, 1))
if order >= 1:
features = np.hstack((features, metaall - offsets))
if order >= 2:
newfeatures = np.array([np.outer(m, m)[np.triu_indices(nmeta)] for m in (metaall - offsets)])
features = np.hstack((features, newfeatures))
blob = do_regressions(dataall, features)
coeffs = np.array([b[0] for b in blob])
#invcovs = np.array([b[1] for b in blob])
covs = np.array([np.linalg.inv(b[1]) for b in blob])
chis = np.array([b[2] for b in blob])
chisqs = np.array([np.dot(b[2],b[2]) - b[3] for b in blob]) # holy crap be careful
scatters = np.array([b[4] for b in blob])
fd = open(fn, "w")
pickle.dump((dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs), fd)
fd.close()
def get_goodness_fit(fn_pickle, filein, Params_all, MCM_rotate_all):
fd = open(fn_pickle,'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters, chis, chisq = pickle.load(fd)
fd.close()
file_with_star_data = str(filein)+".pickle"
#f_flux = open('self_2nd_order.pickle', 'r')
file_normed = normed_training_data.split('.pickle')
if filein != file_normed:
f_flux = open(file_with_star_data, 'r')
flux = pickle.load(f_flux)
if filein == file_normed:
f_flux = open('self_2nd_order.pickle', 'r')
flux, metaall, labels, Ametaall, cluster_name, ids = pickle.load(f_flux)
f_flux.close()
labels = Params_all
nlabels = shape(labels)[1]
nstars = shape(labels)[0]
features_data = np.ones((nstars, 1))
offsets = np.mean(labels, axis = 0)
features_data = np.hstack((features_data, labels - offsets))
newfeatures_data = np.array([np.outer(m, m)[np.triu_indices(nlabels)] for m in (labels - offsets)])
features_data = np.hstack((features_data, newfeatures_data))
chi2_all = np.zeros(nstars)
for jj in range(nstars):
model_gen = np.dot(coeffs,features_data.T[:,jj])
data_star = flux[:,jj,1]
Cinv = 1. / (flux[:,jj, 2] ** 2 + scatters ** 2) # invvar slice of data
#chi = np.sqrt(Cinv) * (data_star - np.dot(coeffs, features_data.T[:,jj]))
chi2 = sum( (Cinv) * (data_star - np.dot(coeffs, features_data.T[:,jj]))**2)
#chi2 = (Cinv)*(model_gen - data_star)**2
chi2_all[jj] = chi2
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
#ax2.plot(flux[:,jj,0],data_star- model_gen, 'r')
noises = (flux[:,jj,2]**2 + scatters**2)**0.5
ydiff_norm = 1./noises*(data_star - model_gen)
bad = flux[:,jj,2] > 0.1
ydiff_norm[bad] = None
data_star[bad] = None
model_gen[bad] = None
ax1.plot(flux[:,jj,0], data_star, 'k')
ax1.plot(flux[:,jj,0], model_gen, 'r')
ax2.plot(flux[:,jj,0],ydiff_norm , 'r')
ax1.set_xlim(15200,16000)
ax1.set_ylim(0.5,1.2)
ax2.set_xlim(15200,16000)
ax2.set_ylim(-10.2,10.2)
prefix = str('check'+str(filein)+"_"+str(jj))
savefig2(fig, prefix, transparent=False, bbox_inches='tight', pad_inches=0.5)
close()
return chi2_all
def savefig2(fig, prefix, **kwargs):
suffix = ".png"
print "writing %s" % (prefix + suffix)
fig.savefig(prefix + suffix, **kwargs)
## non linear stuff below ##
# returns the non linear function
def func(x1, x2, x3, x4, x5, x6, x7, x8, x9, a, b, c):
f = (0
+ x1*a
+ x2*b
+ x3*c
+ x4* a**2#
+ x5 * a * b
+ x6 * a * c
+ x7*b**2
+ x8 * b * c
+ x9*c**2 )
return f
# thankyou stack overflow for the example below on how to use the optimse function
def nonlinear_invert(f, x1, x2, x3, x4, x5, x6, x7, x8, x9 ,sigmavals):
def wrapped_func(observation_points, a, b, c):
x1, x2, x3, x4, x5, x6, x7, x8, x9 = observation_points
return func(x1, x2, x3, x4, x5, x6, x7, x8, x9, a, b, c)
xdata = np.vstack([x1, x2, x3, x4, x5, x6, x7, x8, x9 ])
model, cov = opt.curve_fit(wrapped_func, xdata, f, sigma = sigmavals, maxfev = 2000)#absolute_sigma = True) is not an option in my version of scipy will upgrade scipy
return model, cov
def infer_labels_nonlinear(fn_pickle,testdata, ids, fout_pickle, weak_lower,weak_upper):
#def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower=0.935,weak_upper=0.98):
"""
best log g = weak_lower = 0.95, weak_upper = 0.98
best teff = weak_lower = 0.95, weak_upper = 0.99
best_feh = weak_lower = 0.935, weak_upper = 0.98
this returns the parameters for a field of data - and normalises if it is not already normalised
this is slow because it reads a pickle file
"""
file_in = open(fn_pickle, 'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisq = pickle.load(file_in)
file_in.close()
nstars = (testdata.shape)[1]
nlabels = len(labels)
Params_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, np.shape(coeffs)[1]-1, np.shape(coeffs)[1]-1.))
covs_all = np.zeros((nstars,nlabels, nlabels))
for jj in range(0,nstars):
#if np.any(testdata[:,jj,0] != dataall[:, 0, 0]):
if np.any(abs(testdata[:,jj,0] - dataall[:, 0, 0]) > 0.0001):
print testdata[range(5),jj,0], dataall[range(5),0,0]
assert False
xdata = testdata[:,jj,0]
ydata = testdata[:,jj,1]
ysigma = testdata[:,jj,2]
ydata_norm = ydata - coeffs[:,0] # subtract the mean
f = ydata_norm
t,g,feh = metaall[:,0], metaall[:,1], metaall[:,2]
x0,x1,x2,x3,x4,x5,x6,x7,x8,x9 = coeffs[:,0], coeffs[:,1], coeffs[:,2], coeffs[:,3], coeffs[:,4], coeffs[:,5], coeffs[:,6] ,coeffs[:,7], coeffs[:,8], coeffs[:,9]
Cinv = 1. / (ysigma ** 2 + scatters ** 2)
Params,covs = nonlinear_invert(f, x1, x2, x3, x4, x5, x6, x7, x8, x9, 1/Cinv**0.5 )
Params = Params+offsets
coeffs_slice = coeffs[:,-9:]
MCM_rotate = np.dot(coeffs_slice.T, Cinv[:,None] * coeffs_slice)
Params_all[jj,:] = Params
MCM_rotate_all[jj,:,:] = MCM_rotate
covs_all[jj,:,:] = covs
filein = fout_pickle.split('_tags') [0]
if filein == 'self_2nd_order':
file_in = open(fout_pickle, 'w')
#pickle.dump((Params_all, covs_all), file_in)
file_normed = normed_training_data.split('.pickle')[0]
chi2 = get_goodness_fit(fn_pickle, file_normed, Params_all, MCM_rotate_all)
chi2_def = chi2#/len(xdata)*1.
pickle.dump((Params_all, covs_all,chi2_def,ids), file_in)
file_in.close()
else:
chi2 = get_goodness_fit(fn_pickle, filein, Params_all, MCM_rotate_all)
#chi2 = 1
chi2_def = chi2#/len(xdata)*1.
file_in = open(fout_pickle, 'w')
pickle.dump((Params_all, covs_all, chi2_def, ids), file_in)
file_in.close()
return Params_all , MCM_rotate_all
def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower,weak_upper):
#def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower=0.935,weak_upper=0.98):
"""
best log g = weak_lower = 0.95, weak_upper = 0.98
best teff = weak_lower = 0.95, weak_upper = 0.99
best_feh = weak_lower = 0.935, weak_upper = 0.98
this returns the parameters for a field of data - and normalises if it is not already normalised
this is slow because it reads a pickle file
"""
file_in = open(fn_pickle, 'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
nstars = (testdata.shape)[1]
nlabels = len(labels)
Params_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, nlabels, nlabels))
for jj in range(0,nstars):
if np.any(testdata[:,jj,0] != dataall[:, 0, 0]):
print testdata[range(5),jj,0], dataall[range(5),0,0]
assert False
xdata = testdata[:,jj,0]
ydata = testdata[:,jj,1]
ysigma = testdata[:,jj,2]
ydata_norm = ydata - coeffs[:,0] # subtract the mean
coeffs_slice = coeffs[:,-3:]
#ind1 = np.logical_and(logical_and(dataall[:,jj,0] > 16200., dataall[:,jj,0] < 16500.), np.logical_and(ydata > weak_lower , ydata < weak_upper))
ind1 = np.logical_and(ydata > weak_lower , ydata < weak_upper)
Cinv = 1. / (ysigma ** 2 + scatters ** 2)
MCM_rotate = np.dot(coeffs_slice[ind1].T, Cinv[:,None][ind1] * coeffs_slice[ind1])
MCy_vals = np.dot(coeffs_slice[ind1].T, Cinv[ind1] * ydata_norm[ind1])
Params = np.linalg.solve(MCM_rotate, MCy_vals)
Params = Params + offsets
print Params
Params_all[jj,:] = Params
MCM_rotate_all[jj,:,:] = MCM_rotate
file_in = open(fout_pickle, 'w')
pickle.dump((Params_all, MCM_rotate_all), file_in)
file_in.close()
return Params_all , MCM_rotate_all
def lookatfits(fn_pickle, pixelvalues,testdataall):
# """"
# this is to plot the individual pixel fits on the 6x6 panel
# """"
file_in = open(fn_pickle, 'r')
testdataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
axis_t, axis_g, axis_feh = metaall[:,0], metaall[:,1], metaall[:,2]
nstars = (testdataall.shape)[1]
offsets = np.mean(metaall, axis=0)
features = np.ones((nstars, 1))
features = np.hstack((features, metaall - offsets))
features2 = np.hstack((features, metaall ))
for each in pixelvalues:
flux_val_abs = testdataall[each,:,1]
flux_val_norm = testdataall[each,:,1] - np.dot(coeffs, features.T)[each,:]
coeff = coeffs[each,:]
y_feh_abs = coeff[3]*features[:,3] + coeff[0]*features[:,0]
y_feh_norm = coeff[3]*features[:,3] + coeff[0]*features[:,0] -(coeff[3]*features2[:,3] + coeff[0]*features2[:,0])
y_g_abs = coeff[2]*features[:,2] + coeff[0]*features[:,0]
y_g_norm = coeff[2]*features[:,2] + coeff[0]*features[:,0] - (coeff[2]*features2[:,2] + coeff[0]*features2[:,0])
y_t_abs = coeff[1]*features[:,1] + coeff[0]*features[:,0]
y_t_norm = coeff[1]*features[:,1] + coeff[0]*features[:,0] - (coeff[1]*features2[:,1] + coeff[0]*features2[:,0])
for flux_val, y_feh, y_g, y_t, namesave,lab,ylims in zip([flux_val_abs, flux_val_norm], [y_feh_abs,y_feh_norm],[y_g_abs, y_g_norm], [y_t_abs,y_t_norm],['abs','norm'], ['flux','flux - mean'],
[[-0.2,1.2], [-1,1]] ):
y_meandiff = coeff[0] - flux_val
fig = plt.figure(figsize = [12.0, 12.0])
#
ax = plt.subplot(3,2,1)
pick = testdataall[each,:,2] > 0.1
ax.plot(metaall[:,2], flux_val, 'o',alpha =0.5,mfc = 'None', mec = 'r')
ax.plot(metaall[:,2][pick], flux_val[pick], 'kx',markersize = 10)
ax.plot(metaall[:,2], y_feh, 'k')
ind1 = argsort(metaall[:,2])
ax.fill_between(sort(metaall[:,2]), array(y_feh + std(flux_val))[ind1], array(y_feh - std(flux_val))[ind1] , color = 'y', alpha = 0.2)
ax.set_xlabel("[Fe/H]", fontsize = 14 )
ax.set_ylabel(lab, fontsize = 14 )
ax.set_title(str(np.int((testdataall[each,0,0])))+" $\AA$")
ax.set_ylim(ylims[0], ylims[1])
#
ax = plt.subplot(3,2,2)
ax.plot(metaall[:,1], flux_val, 'o', alpha =0.5, mfc = 'None', mec = 'b')
ax.plot(metaall[:,1][pick], flux_val[pick], 'kx',markersize = 10)
ax.plot(metaall[:,1], y_g, 'k')
ind1 = argsort(metaall[:,1])
ax.fill_between(sort(metaall[:,1]), array(y_g + std(flux_val))[ind1], array(y_g - std(flux_val))[ind1] , color = 'y', alpha = 0.2)
ax.set_xlabel("log g", fontsize = 14 )
ax.set_ylabel(lab, fontsize = 14 )
ax.set_title(str(np.int((testdataall[each,0,0])))+" $\AA$")
ax.set_ylim(ylims[0], ylims[1])
#
ax = plt.subplot(3,2,3)
ax.plot(metaall[:,0], flux_val, 'o',alpha =0.5, mfc = 'None', mec = 'green')
ax.plot(metaall[:,0][pick], flux_val[pick], 'kx', markersize = 10)
ax.plot(metaall[:,0], y_t, 'k')
ind1 = argsort(metaall[:,0])
ax.fill_between(sort(metaall[:,0]), array(y_t + std(flux_val))[ind1], array(y_t - std(flux_val))[ind1] , color = 'y', alpha = 0.2)
ax.set_xlabel("Teff", fontsize = 14 )
ax.set_ylabel(lab, fontsize = 14 )
ax.set_ylim(ylims[0], ylims[1])
#
ax = plt.subplot(3,2,4)
diff_flux = coeffs[each,0] - testdataall[each,:,1]
xrange1 = arange(0,shape(testdataall)[1],1)
ind1 = argsort(metaall[:,2])
ind1_pick = argsort(metaall[:,2][pick])
ax.plot(xrange1, (coeffs[each,0] - testdataall[each,:,1])[ind1], 'o',alpha = 0.5, mfc = 'None', mec = 'grey')
ax.plot(xrange1[pick], (coeffs[each,0] - testdataall[each,:,1][pick])[ind1_pick], 'kx',markersize = 10)
ax.fill_between(xrange1, array(mean(diff_flux) + std(diff_flux)), array(mean(diff_flux) - std(diff_flux)) , color = 'y', alpha = 0.2)
ax.set_xlabel("Star Number (increasing [Fe/H])", fontsize = 14 )
ax.set_ylabel("flux star - mean flux", fontsize = 14 )
ax.set_ylim(-1.0, 1.0)
#
ax = plt.subplot(3,2,5)
for indx, color, label in [
( 1, "g", "Teff"),
( 2, "b", "logg"),
( 3, "r", "FeH")]:
_plot_something(ax, testdataall[:, 0, 0][each-10:each+10], coeffs[:, indx][each-10:each+10], covs[:, indx, indx][each-10:each+10], color, label=label)
ax.axvline(testdataall[:,0,0][each],color = 'grey')
ax.axhline(0,color = 'grey',linestyle = 'dashed')
ax.set_xlim(testdataall[:,0,0][each-9], testdataall[:,0,0][each+9])
ax.legend(loc = 4,fontsize = 10)
ax.set_xlabel("Wavelength $\AA$", fontsize = 14 )
ax.set_ylabel("coeffs T,g,FeH", fontsize = 14 )
#
ax = plt.subplot(3,2,6)
_plot_something(ax, testdataall[:, 0, 0][each-10:each+10], coeffs[:, 0][each-10:each+10], covs[:, 0, 0][each-10:each+10], 'k', label='mean')
ax.set_ylim(0.6,1.1)
ax.set_xlim(testdataall[:,0,0][each-9], testdataall[:,0,0][each+9])
ax.legend(loc = 4,fontsize = 10)
ax.axvline(testdataall[:,0,0][each],color = 'grey')
ax.axhline(0,color = 'grey',linestyle = 'dashed')
ax.set_xlabel("Wavelength $\AA$", fontsize = 14 )
ax.set_ylabel("Mean flux", fontsize = 14 )
savefig(fig, str(each)+"_"+str(namesave) , transparent=False, bbox_inches='tight', pad_inches=0.5)
fig.clf()
# return
def _plot_something(ax, wl, val, var, color, lw=2, label=""):
factor = 1.
if label == "Teff": factor = 1000. # yes, I feel dirty; MAGIC
sig = np.sqrt(var)
ax.plot(wl, factor*(val+sig), color=color, lw=lw, label=label)
ax.plot(wl, factor*(val-sig), color=color, lw=lw)
ax.fill_between(wl, factor*(val+sig), factor*(val-sig), color = color, alpha = 0.2)
return None
def savefig(fig, prefix, **kwargs):
# for suffix in (".png"):
suffix = ".png"
print "writing %s" % (prefix + suffix)
fig.savefig(prefix + suffix)#, **kwargs)
close()
def leave_one_cluster_out():
# this is the test routine to leave one cluster out
dataall, metaall, labels, Ametaall, cluster_name, ids= get_normalized_training_data()
nameu = unique(cluster_name)
cluster_name = array(cluster_name)
for each in nameu:
clust_pick = each
take = array(cluster_name) == clust_pick
inds = arange(0,len(cluster_name),1)
inds1 = inds[take]
#return inds1, cluster_name
train(dataall, metaall, 2, fpickle2, Ametaall, cluster_name, logg_cut= 40.,teff_cut = 0., leave_out=inds1)
field = "self_2nd_order_"
file_in = open(normed_training_data, 'r')
testdataall, metaall, labels, Ametaall, cluster_name, ids = pickle.load(file_in)
file_in.close()
testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, field+"tags.pickle",-10.950,10.99)
plot_leave_one_out(field, clust_pick)
return
def plot_leave_one_out(filein,cluster_out):
file_in2 = open(filein+"tags.pickle", 'r')
params, covs_params = pickle.load(file_in2)
sp = shape(params)
params = array(params)
covs_params = array(covs_params)
file_in2.close()
# this is the test to
filein2 = 'test14.txt' # originally had for test4g_self and for ages_test4g_self that goes with this
filein2 = 'test18.txt' # originally had for test4g_self and for ages_test4g_self that goes with this
filein3 = 'ages.txt' # note ages goes with test14
plot_markers = ['ko', 'yo', 'ro', 'bo', 'co','k*', 'y*', 'r*', 'b*', 'c*', 'ks', 'rs', 'bs', 'cs', 'rd', 'kd', 'bd', 'rd', 'mo', 'ms' ]
# M92, M15, M53, N5466, N4147, M13, M2, M3, M5, M107, M71, N2158, N2420, Pleaides, N7789, M67, N6819 , N188, N6791
t,g,feh,t_err,feh_err = loadtxt(filein2, usecols = (4,6,8,16,17), unpack =1)
tA,gA,fehA = loadtxt(filein2, usecols = (3,5,7), unpack =1)
age = loadtxt(filein3, usecols = (0,), unpack =1)
g_err, age_err = [0]*len(g) , [0]*len(g)
g_err, age_err = array(g_err), array(age_err)
diffT = abs(array(t) - array(tA) )
a = open(filein2)
al = a.readlines()
names = []
for each in al:
names.append(each.split()[1])
diffT = array(diffT)
#pick =logical_and(names != cluster_name, diffT < 600. )
names = array(names)
pick = diffT < 6000. # I need to implement this < 6000 K
pick2 =logical_and(names == cluster_out, diffT < 6000. )
t_sel,g_sel,feh_sel,t_err_sel,g_err_sel,feh_err_sel = t[pick2], g[pick2], feh[pick2], t_err[pick2], g_err[pick2], feh_err[pick2]
t,g,feh,t_err,g_err,feh_err = t[pick], g[pick], feh[pick], t_err[pick], g_err[pick], feh_err[pick]
#
names = array(names)
names = names[pick]
unames = unique(names)
starind = arange(0,len(names), 1)
name_ind = []
names = array(names)
for each in unames:
takeit = each == names
name_ind.append(np.int(starind[takeit][-1]+1. ) )
cluster_ind = [0] + list(sort(name_ind))# + [len(al)]
#
params_sel = array(params)[pick2]
covs_params_sel = array(covs_params)[pick2]
params = array(params)[pick]
covs_params = array(covs_params)[pick]
sp2 = shape(params)
sp3 = len(t)
rcParams['figure.figsize'] = 12.0, 10.0
fig, temp = pyplot.subplots(3,1, sharex=False, sharey=False)
fig = plt.figure()
ax = fig.add_subplot(111, frameon = 0 )
ax.set_ylabel("The Cannon", labelpad = 40, fontsize = 20 )
ax.tick_params(labelcolor= 'w', top = 'off', bottom = 'off', left = 'off', right = 'off' )
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
params_labels = [params[:,0], params[:,1], params[:,2] , covs_params[:,0,0]**0.5, covs_params[:,1,1]**0.5, covs_params[:,2,2]**0.5 ]
cval = ['k', 'b', 'r', ]
input_ASPCAP = [t, g, feh, t_err, g_err, feh_err ]
listit_1 = [0,1,2]
listit_2 = [1,0,0]
axs = [ax1,ax2,ax3]
labels = ['teff', 'logg', 'Fe/H']
for i in range(0,len(cluster_ind)-1):
indc1 = cluster_ind[i]
indc2 = cluster_ind[i+1]
for ax, num,num2,label1,x1,y1 in zip(axs, listit_1,listit_2,labels, [4800,3.0,0.3], [3400,1,-1.5]):
pick = logical_and(g[indc1:indc2] > 0, logical_and(t_err[indc1:indc2] < 300, feh[indc1:indc2] > -4.0) )
cind = array(input_ASPCAP[1][indc1:indc2][pick])
cind = array(input_ASPCAP[num2][indc1:indc2][pick]).flatten()
ax.plot(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick], plot_markers[i])
ax1.plot(params_sel[:,0], t_sel, 'y*', label = cluster_out, markersize = 14)
ax2.plot(params_sel[:,1], g_sel, 'y*', label = cluster_out, markersize = 14)
ax3.plot(params_sel[:,2], feh_sel, 'y*', label = cluster_out, markersize = 14)
ax1.legend(loc=2,numpoints=1)
ax2.legend(loc=2,numpoints=1)
ax3.legend(loc=2,numpoints=1)
ax1.text(5400,3700,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[0+3]),2)),fontsize = 14)
ax2.text(3.9,1,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[1+3]),2)),fontsize = 14)
ax3.text(-0.3,-2.5,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[2+3]),2)),fontsize = 14)
ax1.plot([0,6000], [0,6000], linewidth = 1.5, color = 'k' )
ax2.plot([0,5], [0,5], linewidth = 1.5, color = 'k' )
ax3.plot([-3,2], [-3,2], linewidth = 1.5, color = 'k' )
ax1.set_xlim(3500, 6000)
ax1.set_ylim(1000,6000)
ax1.set_ylim(3500,6000)
ax2.set_xlim(0, 5)
ax3.set_xlim(-3, 1)
ax1.set_xlabel("ASPCAP Teff, [K]", fontsize = 14,labelpad = 5)
ax1.set_ylabel("Teff, [K]", fontsize = 14,labelpad = 5)
ax2.set_xlabel("ASPCAP logg, [dex]", fontsize = 14,labelpad = 5)
ax2.set_ylabel("logg, [dex]", fontsize = 14,labelpad = 5)
ax3.set_xlabel("ASPCAP [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax3.set_ylabel("[Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax2.set_ylim(0,5)
ax3.set_ylim(-3,1)
fig.subplots_adjust(hspace=0.22)
prefix = "/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/"+str(cluster_out)+"_out"
savefig2(fig, prefix, transparent=False, bbox_inches='tight', pad_inches=0.5)
close("all")
print sp, sp2, sp3
return
def leave_one_star_out():
# this is the test routine to leave one cluster out
dataall, metaall, labels, Ametaall, cluster_name, ids= get_normalized_training_data_tsch(pixlist)
#nameu = unique(cluster_name)
#nameu = array(nameu)
cluster_name = array(cluster_name)
ids = array(ids)
idsnew = []
for each in ids:
if len(ids) > 20:
idsnew.append(each.split('2m')[-1])
else:
idsnew.append(each.split)
idsnew = array(idsnew)
nameu = [a+"_"+b for a,b in zip(cluster_name, idsnew)]
nameu = array(nameu)
for each in nameu:
name_pick = each
take = array(nameu) == name_pick
inds = arange(0,len(cluster_name),1)
inds1 = inds[take]
star_take = each #cluster_name[take][0]
#return inds1, cluster_name
train(dataall, metaall, 2, fpickle2, Ametaall, cluster_name, logg_cut= 40.,teff_cut = 0., leave_out=inds1)
# up to here
field = "self_2nd_order_"
file_in = open(normed_training_data, 'r')
testdataall, metaall, labels, Ametaall, cluster_name, ids = pickle.load(file_in)
file_in.close()
testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall[:,take], idsnew[take], field+str(star_take)+"_itags_mknA.pickle",-10.950,10.99)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall[:,take], idsnew[take], field+str(star_take)+"_itags.pickle",-10.950,10.99)
#plot_leave_one_out(field, clust_pick)
return
if __name__ == "__main__":
pixlist = loadtxt("pixtest3.txt", usecols = (0,), unpack =1)
pixlist = loadtxt("pixtest.txt", usecols = (0,), unpack =1) #v20
pixlist = loadtxt("pixlist_fromcoeffs.txt", usecols = (0,), unpack =1) #v21
pixlist = loadtxt("pixtest.txt", usecols = (0,), unpack =1)
#pixlist = loadtxt("pixtest2.txt", usecols = (0,), unpack =1)
pixlist = loadtxt("pixtest3.txt", usecols = (0,), unpack =1)
pixlist = loadtxt("pixtest4.txt", usecols = (0,), unpack =1)
#pixlist = loadtxt("pixtest5.txt", usecols = (0,), unpack =1)
#pixlist = loadtxt("pixtest6.txt", usecols = (0,), unpack =1)
#pixlist = loadtxt("pixtest7.txt", usecols = (0,), unpack =1)
#dataall, metaall, labels, Ametaall, cluster_name, ids = get_normalized_training_data()
dataall, metaall, labels, Ametaall, cluster_name, ids = get_normalized_training_data_tsch(pixlist)
fpickle = "coeffs.pickle"
if not glob.glob(fpickle):
train(dataall, metaall, 1, fpickle, Ametaall,cluster_name, logg_cut= 40.,teff_cut = 0.)
fpickle2 = "coeffs_2nd_order.pickle"
if not glob.glob(fpickle2):
train(dataall, metaall, 2, fpickle2, Ametaall, cluster_name, logg_cut= 40.,teff_cut = 0.)
self_flag = 0
self_flag = 2
self_flag = 0
if self_flag < 1:
a = open('all_test2.txt', 'r')
a = open('all_test5.txt', 'r')
a = open('all_test3.txt', 'r')
a = open('all.txt', 'r')
a = open('all_test.txt', 'r')
#a = open('all_test.txt', 'r')
al = a.readlines()
bl = []
for each in al:
bl.append(each.strip())
for each in bl:
testfile = each
field = testfile.split('.txt')[0]+'_' #"4332_"
#testdataall, ids = get_normalized_test_data(testfile) # if flag is one, do on self
testdataall, ids = get_normalized_test_data_tsch(testfile,pixlist)
#testdataall, ids = get_normalized_test_data_tsch(testfile,pixlist) # if flag is one, do on self
#testmetaall, inv_covars = infer_tags("coeffs.pickle", testdataall, field+"tags.pickle",-10.94,10.99)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_v14_noise.pickle",-10.90,10.99)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_v18.pickle",-10.90,10.99)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_mknown.pickle",-10.90,10.99)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_mknA.pickle",-10.90,10.99)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_mknA_v20.pickle",0.00,1.40)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_v19.pickle",0.00,1.40)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_v20.pickle",0.00,1.40)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_mkn_v21.pickle",0.00,1.40)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_test.pickle",0.00,1.40)
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_mknA_TCA_v1.pickle",0.00,1.40)
testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_test.pickle",0.00,1.40)
if self_flag == 1:
#testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags_chi2_df_mknA.pickle",-10.90,10.99)
field = "self_"
file_in = open(normed_training_data, 'r')
testdataall, metaall, labels, Ametaall, cluster_name = pickle.load(file_in)
lookatfits('coeffs.pickle',[1002,1193,1383,1496,2803,4000,4500, 5125],testdataall)
file_in.close()
testmetaall, inv_covars = infer_labels("coeffs.pickle", testdataall, field+"tags.pickle",-10.960,11.03)
if self_flag == 2:
field = "self_2nd_order_"
file_in = open(normed_training_data, 'r')
testdataall, metaall, labels, Ametaall, cluster_name,ids = pickle.load(file_in)
file_in.close()
testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, ids, field+"tags.pickle",-10.950,10.99)
if self_flag == 3:
leave_one_star_out()
|
mkness/TheCannon
|
code/fitspectra.py
|
Python
|
mit
| 56,681
|
[
"VisIt"
] |
bd01448399dd67887e6ba01e55f3ba986e2bd9828df81b33578f33a1451263ad
|
'''
Interpolation actors. Inserts additional events between each pair of events, using
a selected interpolation scheme.The interpolation schemes are:
* zero interpolation - insert zero values
* step interpolation - holds the last value
* linear interpolation - places values on a straight line between successive events
@author: Brian Thorne
@author: Allan McInnes
Created on 1/12/2009
'''
#TODO: probably ought to consider refactoring DT interpolation into separate
# classes, since it has different behavior. What's in there right now seems like
# a bit of a hack.
from scipysim.actors import Siso, Channel, Event, SisoTestHelper
import logging
import unittest
class Interpolator(Siso):
'''
Abstract base class for interpolation actors.
'''
def __init__(self, input_channel, output_channel, interpolation_factor=2):
'''
Constructor for an interpolation actor.
@param interpolation_factor: the number of events in the signal will be increased
by this factor. For interpolation_factor N, the interpolator
will add N-1 events between each pair of input events.
'''
super(Interpolator, self).__init__(input_channel=input_channel,
output_channel=output_channel,
child_handles_output=True)
self.interpolation_factor = int(interpolation_factor)
self.last_event = None
self.last_out_tag = None
self.domain = input_channel.domain
def interpolate(self, event, tag):
'''This method must be overridden. It implements the interpolation algorithm
based on the current and previous events.
@return an event
'''
raise NotImplementedError
def siso_process(self, event):
if self.last_event:
for i in range(1, self.interpolation_factor):
tag = (self.last_event['tag']
+ ((event['tag'] - self.last_event['tag'])
* (i / float(self.interpolation_factor))))
new_event = self.interpolate(event, tag)
if self.domain == 'DT':
# In the DT domain we can't have fractional tag values.
# So the output signal is output[n] = input[n/N], where
# N is the interpolation factor.
# E.g. for N = 2
# output[0] = input[0]
# output[1] = interpolated value
# output[2] = input[1]
# ...
# Note how the tags outgoing events differ from
# the tags on the corresponding incoming events.
# This is not an issue for models with continuous time.
assert self.last_out_tag is not None
new_event = Event(self.last_out_tag + i, new_event.value)
# print new_event['tag']
self.output_channel.put(new_event)
self.last_event = event.copy()
if self.domain == 'DT' and self.last_out_tag is not None:
# Produce DT output tagging as described above
out_event = Event(self.last_out_tag + self.interpolation_factor,
event.value)
else:
out_event = event
self.last_out_tag = out_event.tag
self.output_channel.put(out_event)
class InterpolatorZero(Interpolator):
'''zero interpolation - insert zero values.'''
def interpolate(self, event, tag):
return Event(tag = tag, value = 0.0 )
class InterpolatorStep(Interpolator):
'''step interpolation - holds the last value.'''
def interpolate(self, event, tag):
return Event(tag = tag, value = self.last_event.value )
class InterpolatorLinear(Interpolator):
'''linear interpolation - places values on a straight line between
successive events.
'''
def interpolate(self, event, tag):
m = ((event.value - self.last_event.value)
/ (event.tag - self.last_event.tag))
dt = (tag - self.last_event.tag)
val = m * dt + self.last_event.value
return Event(tag = tag, value = val )
class InterpolateTests(unittest.TestCase):
'''Test the interpolation actors'''
def setUp(self):
'''
Unit test setup code
'''
self.q_in = Channel()
self.q_out = Channel()
def test_zero_interpolation_ct(self):
'''Test zero interpolation of a simple CT signal.
'''
inp = [Event(value = i, tag = i) for i in xrange(-10, 11, 2)]
expected_outputs = [Event(tag = t, value = t if not (t % 2) else 0.0 )
for t in xrange(-10, 11, 1)]
block = InterpolatorZero(self.q_in, self.q_out)
SisoTestHelper(self, block, inp, expected_outputs)
def test_zero_interpolation_dt(self):
'''Test zero interpolation of a simple DT signal.
'''
inp = [Event(value = i, tag = i) for i in xrange(-10, 11, 1)]
expected_outputs = [Event(tag = t, value = (t / 2.0 - 5.0) if not (t % 2) else 0.0 )
for t in xrange(-10, 31, 1)]
self.q_in = Channel('DT')
self.q_out = Channel('DT')
block = InterpolatorZero(self.q_in, self.q_out)
SisoTestHelper(self, block, inp, expected_outputs)
def test_step_interpolation_ct(self):
'''Test step interpolation of a simple CT signal.
'''
inp = [Event(value = i, tag = i) for i in xrange(-10, 11, 2)]
expected_outputs = [Event(tag = t, value = t if not (t % 2) else t - 1 )
for t in xrange(-10, 11, 1)]
block = InterpolatorStep(self.q_in, self.q_out)
SisoTestHelper(self, block, inp, expected_outputs)
def test_step_interpolation_dt(self):
'''Test step interpolation of a simple DT signal.
'''
inp = [Event(value = i, tag = i) for i in xrange(-10, 11, 1)]
expected_outputs = [Event(tag = t, value = (t / 2.0 - 5.0)
if not (t % 2) else ((t - 1) / 2.0 - 5.0) )
for t in xrange(-10, 31, 1)]
self.q_in = Channel('DT')
self.q_out = Channel('DT')
block = InterpolatorStep(self.q_in, self.q_out)
SisoTestHelper(self, block, inp, expected_outputs)
def test_linear_interpolation_ct(self):
'''Test linear interpolation of a simple CT signal.
'''
inp = [Event(value = i, tag = i) for i in xrange(-10, 11, 2)]
expected_outputs = [Event(tag = t, value = t ) for t in xrange(-10, 11, 1)]
block = InterpolatorLinear(self.q_in, self.q_out)
SisoTestHelper(self, block, inp, expected_outputs)
def test_linear_interpolation_dt(self):
'''Test linear interpolation of a simple DT signal.
'''
inp = [Event(value = i, tag = i) for i in xrange(-10, 11, 1)]
expected_outputs = [Event(tag = t, value = (t / 2.0 - 5.0) )
for t in xrange(-10, 31, 1)]
self.q_in = Channel('DT')
self.q_out = Channel('DT')
block = InterpolatorLinear(self.q_in, self.q_out)
SisoTestHelper(self, block, inp, expected_outputs)
if __name__ == "__main__":
unittest.main()
|
mm318/scipysim-nogui
|
scipysim/actors/signal/interpolator.py
|
Python
|
gpl-3.0
| 7,526
|
[
"Brian"
] |
947d426b87f60c48d59a803ccd0ca4747934dbe16128c5c82a8038504ff72722
|
"""
Acceptance tests for studio related to the outline page.
"""
import json
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from ...pages.studio.utils import add_discussion, drag, verify_ordering
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.config import ConfigModelFixture
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
from ...pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr('shard_3')
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda (outline): drag(outline, source, target),
expected_ordering
)
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr('shard_3')
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState(object):
"""
Default values for representing the published state of a unit
"""
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState(object):
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr('shard_3')
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.due_date = '7/21/2014'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr('shard_3')
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
Then I see two sections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_sections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
def test_locked_subsections_do_not_appear_in_lms(self):
"""
Scenario: A locked subsection is not visible to students in the LMS
Given I have a course with two subsections
When I enable explicit staff lock on one subsection
And I click the View Live button to switch to staff view
Then I see two subsections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_subsections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_subsections, 1)
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr('shard_3')
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr('shard_3')
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr('shard_3')
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr('shard_3')
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr('shard_3')
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr('shard_3')
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr('shard_3')
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
self.assertTrue(unit.is_browser_on_page)
@attr('shard_3')
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr('shard_3')
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = ('To avoid errors, edX strongly recommends that you remove unsupported features '
'from the course advanced settings. To do this, go to the Advanced Settings '
'page, locate the "Advanced Module List" setting, and then delete the following '
'modules from the list.')
DEFAULT_DISPLAYNAME = "Deprecated Component"
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if ORA1
advance modules are not present and also no ORA1 component exist in
course outline.
When I goto course outline
Then I don't see ORA1 deprecated warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if ORA1 advance modules
and ORA1 components are present.
Given I have ORA1 advance modules present in `Advanced Module List`
And I have created 2 ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Open', 'Peer'],
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if ORA1 components are present.
Given I have created 1 ORA1 deprecated component
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see list of ORA1 components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated ORA1 component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='combinedopenended', display_name="", data=load_data_str('ora_peer_problem.xml'))
)
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_ora1_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 advance modules are present and no ORA1 component exist.
Given I have ORA1 advance modules present in `Advanced Module List`
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I don't see list of ORA1 components
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_warning_with_ora1_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 component exist and no ORA1 advance modules are present.
Given I have created two ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I don't see ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Open', 'Peer']
)
@attr('shard_4')
class SelfPacedOutlineTest(CourseOutlineTest):
"""Test the course outline for a self-paced course."""
def populate_course_fixture(self, course_fixture):
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME)
)
),
)
self.course_fixture.add_course_details({
'self_paced': True,
'start_date': datetime.now() + timedelta(days=1)
})
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
def test_release_dates_not_shown(self):
"""
Scenario: Ensure that block release dates are not shown on the
course outline page of a self-paced course.
Given I am the author of a self-paced course
When I go to the course outline
Then I should not see release dates for course content
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
self.assertEqual(section.release_date, '')
subsection = section.subsection(SUBSECTION_NAME)
self.assertEqual(subsection.release_date, '')
def test_edit_section_and_subsection(self):
"""
Scenario: Ensure that block release/due dates are not shown
in their settings modals.
Given I am the author of a self-paced course
When I go to the course outline
And I click on settings for a section or subsection
Then I should not see release or due date settings
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
modal = section.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
modal.cancel()
subsection = section.subsection(SUBSECTION_NAME)
modal = subsection.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
|
hamzehd/edx-platform
|
common/test/acceptance/tests/studio/test_studio_outline.py
|
Python
|
agpl-3.0
| 81,400
|
[
"VisIt"
] |
7061765e3a52e6ac16f12c54fffa32257b09c2f9691c956d68aacea98f97e685
|
"""
Brian T. Bailey
ITM 513 - MP4
MP4 Main Driver
"""
FTPHOST = 'glenellyn.rice.iit.edu'
FTPUSER = 'bbailey4'
FTPPSSWD = '@4clibri'
|
briantbailey/ITM-513
|
mp4/src/mp4domain/ftpconfig.py
|
Python
|
mit
| 132
|
[
"Brian"
] |
aa6c67d211db904db2138b6f53e86ec3ef176d190dd79064ea8bc6519c89a803
|
"""
Utilities for reading YAML configurations for SHyFT simulations.
"""
import os
from datetime import datetime
import yaml
import numpy as np
from shyft import api
from shyft.api import pt_gs_k, pt_ss_k, pt_hs_k, hbv_stack
from shyft.repository.netcdf import (
RegionModelRepository, GeoTsRepository, get_geo_ts_collection, yaml_config)
from shyft.repository.interpolation_parameter_repository import (
InterpolationParameterRepository)
from .simulator import DefaultSimulator
utc_calendar = api.Calendar()
"""Invariant global calendar in UTC."""
def utctime_from_datetime(dt):
"""Returns utctime of datetime dt (calendar interpreted as UTC)."""
# Number of seconds since epoch
return utc_calendar.time(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
class ConfigError(Exception):
pass
class YAMLConfig(object):
def __init__(self, config_file, config_section, **kwargs):
"""
Setup a config instance for a netcdf orchestration from a YAML file.
Parameters
----------
config_file : string
Path to the YAML configuration file
config_section : string
Section in YAML file for simulation parameters.
Returns
-------
YAMLConfig instance
"""
# The config_file needs to be an absolute path or have 'config_dir'
if os.path.isabs(config_file):
self._config_file = config_file
self.config_dir = os.path.dirname(config_file)
elif "config_dir" in kwargs:
self._config_file = os.path.join(kwargs["config_dir"], config_file)
else:
raise ConfigError(
"'config_file' must be an absolute path "
"or 'config_dir' passed as an argument")
self._config_section = config_section
# Load main configuration file
with open(self._config_file) as cfg:
config = yaml.load(cfg)[config_section]
# Expose all keys in yaml file as attributes
self.__dict__.update(config)
# Override the parameters with kwargs
self.__dict__.update(kwargs)
# Check validity of some attributes
if not hasattr(self, "config_dir"):
raise ConfigError(
"'config_dir' must be present in config section "
"or passed as an argument")
if not (os.path.isdir(self.config_dir) and
os.path.isabs(self.config_dir)):
raise ConfigError(
"'config_dir' must exist and be an absolute path")
if not hasattr(self, "data_dir"):
raise ConfigError(
"'data_dir' must be present in config section "
"or passed as an argument")
if not (os.path.isdir(self.data_dir) and
os.path.isabs(self.data_dir)):
raise ConfigError(
"'data_dir' must exist and be an absolute path")
# Create a time axis
self.start_time = utctime_from_datetime(self.start_datetime)
self.time_axis = api.TimeAxisFixedDeltaT(
self.start_time, self.run_time_step, self.number_of_steps)
# Get the region model in API (already an object if in kwargs)
if 'model_t' not in kwargs:
module, model_t = self.model_t.split(".")
self.model_t = getattr(globals()[module], model_t)
def get_simulator(self):
"""
Return a DefaultSimulator based on `cfg`.
Returns
-------
DefaultSimulator instance
"""
# Read region, model and datasets config files
region_config_file = os.path.join(
self.config_dir, self.region_config_file)
region_config = yaml_config.RegionConfig(region_config_file)
model_config_file = os.path.join(
self.config_dir, self.model_config_file)
model_config = yaml_config.ModelConfig(model_config_file)
datasets_config_file = os.path.join(
self.config_dir, self.datasets_config_file)
datasets_config = yaml_config.YamlContent(datasets_config_file)
# Build some interesting constructs
region_model = RegionModelRepository(
region_config, model_config, self.model_t, self.epsg)
interp_repos = InterpolationParameterRepository(model_config)
geo_ts = get_geo_ts_collection(datasets_config, self.data_dir)
# If region and interpolation ids are not present, just use fake ones
region_id = 0 if not hasattr(self, "region_id") else int(self.region_id)
interpolation_id = 0 if not hasattr(self, "interpolation_id") \
else int(self.interpolation_id)
# set up the simulator
simulator = DefaultSimulator(region_id, interpolation_id, region_model,
geo_ts, interp_repos, None)
return simulator
def __repr__(self):
srepr = "%s::%s(" % (self.__class__.__name__, self._config_section)
for key in self.__dict__:
srepr += "%s=%r, " % (key, self.__dict__[key])
srepr = srepr[:-2]
return srepr + ")"
|
felixmatt/shyft
|
shyft/orchestration/config.py
|
Python
|
lgpl-3.0
| 5,145
|
[
"NetCDF"
] |
a0bb28064d8d0e9637756e587d44f43b8d158095ad0881a39e95c6314e52fcc1
|
from __future__ import division
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pybasicbayes.distributions import Regression, Gaussian
from pybasicbayes.util.text import progprint_xrange
from pypolyagamma.distributions import BernoulliRegression
from pyslds.models import HMMCountSLDS, WeakLimitStickyHDPHMMCountSLDS
npr.seed(0)
cmap = "jet"
### Hyperparameters
K, Kmax, D_obs, D_latent = 2, 10, 10, 2
mu_init = np.zeros(D_latent)
mu_init[0] = 1.0
sigma_init = 0.01 * np.eye(D_latent)
# Create an SLDS with stable dynamics matrices
def random_rotation(n,theta):
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n,n))
out[:2,:2] = rot
q = np.linalg.qr(np.random.randn(n,n))[0]
return q.dot(out).dot(q.T)
As = [random_rotation(D_latent, np.pi/24.),
random_rotation(D_latent, np.pi/8.)]
# Start with a random emission matrix
C = np.random.randn(D_obs, D_latent)
b = -2.0 * np.ones((D_obs, 1))
init_dynamics_distns = [Gaussian(mu=mu_init, sigma=sigma_init) for _ in range(K)]
dynamics_distns = [Regression(A=A, sigma=0.01*np.eye(D_latent)) for A in As]
emission_distns = BernoulliRegression(D_obs, D_latent, A=C, b=b)
truemodel = HMMCountSLDS(
dynamics_distns=dynamics_distns,
emission_distns=emission_distns,
init_dynamics_distns=init_dynamics_distns,
alpha=3., init_state_distn='uniform')
### Generate data from an SLDS
# Manually create the states object with the mask
T = 1000
stateseq = np.repeat(np.arange(T//100) % 2, 100).astype(np.int32)
statesobj = truemodel._states_class(model=truemodel, T=stateseq.size, stateseq=stateseq)
statesobj.generate_gaussian_states()
data = statesobj.data = statesobj.generate_obs()
# Manually mask off chunks of data
mask = np.ones_like(data, dtype=bool)
chunksz = 50
for i,offset in enumerate(range(0,T,chunksz)):
j = i % (D_obs + 1)
if j < D_obs:
mask[offset:min(offset+chunksz, T), j] = False
if j == D_obs:
mask[offset:min(offset+chunksz, T), :] = False
statesobj.mask = mask
truemodel.states_list.append(statesobj)
### Make a model
model = WeakLimitStickyHDPHMMCountSLDS(
init_dynamics_distns=
[Gaussian(
nu_0=5, sigma_0=3.*np.eye(D_latent),
mu_0=np.zeros(D_latent), kappa_0=0.01,
mu=mu_init, sigma=sigma_init
) for _ in range(Kmax)],
dynamics_distns=
[Regression(
A=np.eye(D_latent), sigma=np.eye(D_latent),
nu_0=D_latent+3,
S_0=D_latent*np.eye(D_latent),
M_0=np.zeros((D_latent, D_latent)),
K_0=D_latent*np.eye(D_latent),
) for _ in range(Kmax)],
emission_distns=BernoulliRegression(D_obs, D_latent),
alpha=3., gamma=3.0, kappa=100., init_state_distn='uniform')
model.add_data(data=data, mask=mask)
### Run a Gibbs sampler
N_samples = 500
def gibbs_update(model):
model.resample_model()
smoothed_obs = model.states_list[0].smooth()
return model.log_likelihood(), model.stateseqs[0], smoothed_obs
lls, z_smpls, smoothed_obss = \
zip(*[gibbs_update(model) for _ in progprint_xrange(N_samples)])
### Plot the log likelihood over iterations
plt.figure(figsize=(10,6))
plt.plot(lls,'-b')
plt.plot([0,N_samples], truemodel.log_likelihood() * np.ones(2), '-k')
plt.xlabel('iteration')
plt.ylabel('log likelihood')
### Plot the smoothed observations
fig = plt.figure(figsize=(10,10))
N_subplots = min(D_obs,6)
gs = gridspec.GridSpec(N_subplots*2+1,1)
zax = fig.add_subplot(gs[0])
zax.imshow(truemodel.stateseqs[0][None,:], aspect='auto', cmap=cmap)
zax.set_ylabel("Discrete \nstate", labelpad=20, multialignment="center", rotation=90)
zax.set_xticklabels([])
zax.set_yticks([])
given_data = data.copy()
given_data[~mask] = np.nan
masked_data = data.copy()
masked_data[mask] = np.nan
ylims = (-1.1*abs(data).max(), 1.1*abs(data).max())
xlims = (0, min(T,1000))
n_to_plot = np.arange(min(5, D_obs))
for i,j in enumerate(n_to_plot):
ax = fig.add_subplot(gs[1+2*i:1+2*(i+1)])
# Plot spike counts
given_ts = np.where(given_data[:,j]==1)[0]
ax.plot(given_ts, np.ones_like(given_ts), 'ko', markersize=5)
masked_ts = np.where(masked_data[:,j]==1)[0]
ax.plot(masked_ts, np.ones_like(masked_ts), 'o', markerfacecolor="gray", markeredgecolor="none", markersize=5)
# Plot the inferred rate
ax.plot([0], [0], 'b', lw=2, label="smoothed obs.")
ax.plot(smoothed_obss[-1][:,j], 'r', lw=2, label="smoothed pr.")
# Overlay the mask
ax.imshow(1-mask[:,j][None,:],cmap="Greys",alpha=0.25,extent=(0,T) + ylims, aspect="auto")
if i == 0:
plt.legend(loc="upper center", ncol=4, bbox_to_anchor=(0.5, 2.))
if i == N_subplots - 1:
plt.xlabel('time index')
ax.set_xlim(xlims)
ax.set_ylim(0, 1.1)
ax.set_ylabel("$x_%d(t)$" % (j+1))
### Plot the discrete state samples
fig = plt.figure(figsize=(8,4))
gs = gridspec.GridSpec(6,1)
ax1 = fig.add_subplot(gs[:-1])
ax2 = fig.add_subplot(gs[-1])
im = ax1.imshow(np.array(z_smpls), aspect='auto', interpolation="none", cmap=cmap)
ax1.autoscale(False)
ax1.set_ylabel("Iteration")
ax1.set_xticks([])
ax2.imshow(truemodel.stateseqs[0][None,:], aspect='auto', cmap=cmap)
ax2.set_ylabel("True", labelpad=27)
ax2.set_xlabel("Time")
ax2.set_yticks([])
fig.suptitle("Discrete state samples")
plt.show()
|
mattjj/pyhsmm-slds
|
examples/bernoulli_slds.py
|
Python
|
mit
| 5,425
|
[
"Gaussian"
] |
170e8871bf0c4e031b2217f9b286cea9352942af113ad8bee37b5fc7ed95ce94
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt.
import datetime
import hashlib
import logging
import time
from django.core.cache import cache
from django.core.cache import get_cache
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_delete
from django.db.models.signals import post_save
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from lizard_map.fields import ColorField
from lizard_map.operations import named_list
from lizard_map.operations import tree_from_list
from lizard_map.operations import unique_list
from lizard_map.symbol_manager import list_image_file_names
from lizard_map.utility import get_host
from lizard_map.views import get_view_state
from pytz import UnknownTimeZoneError
from socket import gaierror
from tls import request as tls_request
from xml.parsers.expat import ExpatError
import pytz
from lizard_fewsjdbc import timeout_xmlrpclib
from lizard_fewsjdbc.utils import format_number
JDBC_NONE = -999
JDBC_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
FILTER_CACHE_KEY = 'lizard_fewsjdbc.models.filter_cache_key'
PARAMETER_NAME_CACHE_KEY = 'lizard_fewsjdbc.models.parameter_name_cache_key'
LOCATION_CACHE_KEY = 'lizard_fewsjdbc.layers.location_cache_key'
CACHE_TIMEOUT = 8 * 60 * 60 # Default is 8 hours
LOG_JDBC_QUERIES = True
logger = logging.getLogger(__name__)
class FewsJdbcNotAvailableError(gaierror):
"""Wrapping of generic socket.gaierror into a clearer error."""
def __str__(self):
return 'FEWS Jdbc not available. ' + gaierror.__str__(self)
class FewsJdbcQueryError(Exception):
"""Proper exception instead of -1 or -2 ints that the query returns."""
def __init__(self, value, query=None, connector_string=None):
self.value = value
self.query = query
self.connector_string = connector_string
def __str__(self):
return 'The FEWS jdbc query [%s] to [%s] returned error code %s' % (
self.query, self.connector_string, self.value)
def lowest_filters(id_value, tree, below_id_value=False):
"""Return all ids from descendants from id_value without children.
Input is a hierarchical tree structure with at least 'id' and
'children'.
"""
result = []
for child in tree:
if below_id_value:
below_id = True
else:
if id_value == child['id']:
below_id = True
else:
below_id = False
result += lowest_filters(
id_value, child['children'],
below_id_value=below_id)
if not child['children'] and below_id:
result += [child['id'], ]
return result
class JdbcSource(models.Model):
"""
Uses Jdbc2Ei to connect to a Jdbc source. Works only for Jdbc2Ei
that is connected to a FEWS-JDBC server.
"""
slug = models.SlugField()
name = models.CharField(max_length=200)
jdbc_url = models.URLField(max_length=200)
jdbc_tag_name = models.CharField(max_length=80)
connector_string = models.CharField(max_length=200)
filter_tree_root = models.CharField(
max_length=80,
blank=True, null=True,
help_text=("Fill in the filter id to use as filter root. "
"Only works if no usecustomfilter."))
usecustomfilter = models.BooleanField(default=False)
customfilter = models.TextField(
blank=True,
null=True,
help_text=(
"Use a pythonic list of dictionaries. "
"The rootnode has 'parentid': None. i.e. "
"[{'id':'id','name':'name','parentid':None}, "
"{'id':'id2','name':'name2','parentid':'id'}]"))
timezone_string = models.CharField(
max_length=40, default="", blank=True,
help_text=("""
Time zone of the datetimes coming from FEWS. Use this only
if the information coming from FEWS itself is
incorrect. An empty string means we trust FEWS. A few
possibilities are UTC, CET (Dutch winter time), CEST
(Dutch summer time) and Europe/Amsterdam (Dutch local time
switching between summer and winter time).
"""))
class Meta:
verbose_name = _("Jdbc source")
verbose_name_plural = _("Jdbc sources")
def __unicode__(self):
return u'%s' % self.name
def query(self, q, is_timeseries_query=False):
"""
Tries to connect to the Jdbc source and fire query. Returns
list of lists.
Throws FewsJdbcQueryError if the server is not reachable and a
FewsJdbcQueryError if the jdbc server returns a ``-1`` or
``-2`` error code.
"""
if '"' in q:
logger.warn(
"You used double quotes in the query. "
"Is it intended? Query: %s", q)
t1 = time.time()
try:
sp = timeout_xmlrpclib.ServerProxy(self.jdbc_url, timeout=29)
# For debugging: add 'verbose=True' in the line above. This prints
# ALL the output.
sp.Ping.isAlive('', '')
except gaierror, e:
# Re-raise as more recognizable error.
raise FewsJdbcNotAvailableError(e)
t2 = time.time()
try:
# Check if jdbc_tag_name is used
sp.Config.get('', '', self.jdbc_tag_name)
except ExpatError:
sp.Config.put('', '', self.jdbc_tag_name, self.connector_string)
t3 = time.time()
if is_timeseries_query:
sp.mark_as_timeseries_query()
result = sp.Query.execute('', '', q, [self.jdbc_tag_name])
t4 = time.time()
if isinstance(result, int):
raise FewsJdbcQueryError(result, q, self.connector_string)
if result == [-2]:
logger.warn("Should not happen. Reinout wants this 'if' "
"removed if it doesn't occur in sentry.")
raise FewsJdbcQueryError(result, q, self.connector_string)
if LOG_JDBC_QUERIES:
ping_time = round(1000 * (t2 - t1))
tag_check_time = round(1000 * (t3 - t2))
query_time = round(1000 * (t4 - t3))
total_time = round(1000 * (t4 - t1))
logger.debug("%dms (%d ping, %d tag check, %d query):\n %s",
total_time, ping_time, tag_check_time, query_time, q)
return result
@property
def _customfilter(self):
return eval(self.customfilter)
def get_filter_tree(self,
url_name='lizard_fewsjdbc.jdbc_source',
ignore_cache=False,
cache_timeout=CACHE_TIMEOUT):
"""
Gets filter tree from Jdbc source. Also adds url per filter
which links to url_name.
[{'name': <name>, 'url': <url>, children: [...]}]
url, children is optional. If url_name is set to None, no url
property will be set in the filter tree (useful if the
standard fewsjdbc urls don't exist, for instance when only the
REST API is used).
Uses cache unless ignore_cache == True. cache_timeout gives
an alternative timeout duration for the cache, in seconds.
"""
filter_source_cache_key = '%s::%s::%s::%s' % (
url_name, FILTER_CACHE_KEY, self.slug, get_host())
filter_tree = cache.get(filter_source_cache_key)
if filter_tree is None or ignore_cache:
# Building up the fews filter tree.
if self.usecustomfilter:
named_filters = self._customfilter
root_parent = None
else:
try:
filters = self.query(
"select distinct id, name, parentid from filters;")
except FewsJdbcNotAvailableError, e:
return [{'name': _('Jdbc2Ei server not available.'),
'error': e}]
except FewsJdbcQueryError, e:
logger.error("JdbcSource returned an error: %s", e,
extra={'jdbc_url': e.jdbc_url})
# ^^^ 'extra' ends up as tags in sentry.
return [{'name': 'Jdbc data source not available.',
'error code': e}]
unique_filters = unique_list(filters)
named_filters = named_list(unique_filters,
['id', 'name', 'parentid'])
if self.filter_tree_root:
root_parent = self.filter_tree_root
else:
root_parent = JDBC_NONE
# Add url per filter. Only if url_name is actually present.
if url_name:
for named_filter in named_filters:
url = reverse(url_name,
kwargs={'jdbc_source_slug': self.slug})
url += '?filter_id=%s' % named_filter['id']
# There used to be a line here that added
# 'ignore_cache=True' to the URL if ignore_cache
# is true. However, the variable controls whether
# we currently ignore the cache, not whether the
# URLs we build ignore it. In normal
# circumstances, the cache should not be ignored.
named_filter['url'] = url
# Make the tree.
filter_tree = tree_from_list(
named_filters,
id_field='id',
parent_field='parentid',
children_field='children',
root_parent=root_parent)
if filter_tree:
# Only cache it when we actually get a tree. Otherwise a
# one-time fewsjdbc error can propagate.
cache.set(filter_source_cache_key, filter_tree, cache_timeout)
return filter_tree
def get_named_parameters(self, filter_id, ignore_cache=False,
find_lowest=True,
url_name='lizard_fewsjdbc.jdbc_source',
cache_timeout=CACHE_TIMEOUT):
"""
Get named parameters given filter_id: [{'name': <filter>,
'parameterid': <parameterid1>, 'parameter': <parameter1>},
...]
The parameters are parameters from the lowest filter below
given filter_id.
If find_lowest is True, then this function first searches for
all the leaf filter nodes below this one, and then returns the
parameters of those. If find_lowest is set to False (for
instance because filter_id is already known to be a leaf),
only parameters directly connected to this filter are
returned.
Uses cache unless ignore_cache == True. cache_timeout gives
an alternative timeout duration for the cache, in seconds.
"""
parameter_cache_key = ('%s::%s::%s::%s' %
(FILTER_CACHE_KEY, self.slug, str(filter_id),
get_host()))
named_parameters = cache.get(parameter_cache_key)
if find_lowest:
filter_names = lowest_filters(
filter_id, self.get_filter_tree(url_name=url_name))
else:
filter_names = (filter_id,)
filter_query = " or ".join(
["id='%s'" % filter_name for filter_name in filter_names])
if ignore_cache or named_parameters is None:
parameter_result = self.query(
("select name, parameterid, parameter, id "
"from filters where %s" % filter_query))
unique_parameters = unique_list(parameter_result)
named_parameters = named_list(
unique_parameters,
['filter_name', 'parameterid', 'parameter', 'filter_id'])
if named_parameters:
# Only cache it when we actually get parameters. Otherwise a
# one-time fewsjdbc error can propagate.
cache.set(parameter_cache_key, named_parameters, cache_timeout)
return named_parameters
def get_filter_name(self, filter_id):
"""Return the filter name corresponding to the given filter
id."""
cache_key = 'filter_name:%s:%s:%s' % (get_host(), filter_id, self.slug)
result = cache.get(cache_key)
if result is None:
result = self.query(
"select distinct name from filters where id='%s'"
% (filter_id,))
if result:
result = result[0][0]
cache.set(cache_key, result, 60 * 60)
return result
def get_parameter_name(self, parameter_id):
"""Return parameter name corresponding to the given parameter
id."""
cache_key = 'parameter_name:%s:%s:%s' % (
get_host(), parameter_id, self.slug)
result = cache.get(cache_key)
if result is None:
result = self.query(
"select distinct parameter from filters where "
"parameterid = '%s'" % (parameter_id,))
if result:
result = result[0][0]
cache.set(cache_key, result, 60 * 60)
return result
def get_locations(self, filter_id, parameter_id,
cache_timeout=CACHE_TIMEOUT):
"""
Query locations from jdbc source and return named locations in
a list.
{'location': '<location name>', 'longitude': <longitude>,
'latitude': <latitude>}
cache_timeout gives an alternative timeout duration for the
cache, in seconds.
"""
location_cache_key = ('%s::%s::%s::%s' %
(LOCATION_CACHE_KEY, filter_id,
parameter_id, get_host()))
named_locations = cache.get(location_cache_key)
if named_locations is None:
query = ("select longitude, latitude, "
"location, locationid "
"from filters "
"where id='%s' and parameterid='%s'" %
(filter_id, parameter_id))
locations = self.query(query)
named_locations = named_list(
locations,
['longitude', 'latitude',
'location', 'locationid'])
if named_locations:
# Only cache when we actually have found locations. It might
# otherwise just be fewsjdbc that tricks us with an empty
# list.
cache.set(location_cache_key, named_locations, cache_timeout)
return named_locations
def location_list(self, filter_id, parameter_id, name=''):
query = (
"select locationid, location "
"from filters "
"where id='{}' and parameterid='{}' and location like '%{}%'"
).format(filter_id, parameter_id, name)
locations = self.query(query)
return locations
def get_timeseries(self, filter_id, location_id, parameter_id,
zoom_start_date, zoom_end_date):
"""Wrapper around _get_timeseries() with some date normalization."""
try:
view_state = get_view_state(tls_request)
view_state_start_date = view_state['dt_start']
view_state_end_date = view_state['dt_end']
assert view_state_start_date is not None # Upon first site visit.
except: # yes, bare except.
view_state_start_date = zoom_start_date
view_state_end_date = zoom_end_date
date_range_size = zoom_end_date - zoom_start_date
if date_range_size < datetime.timedelta(days=7):
# Normalize the start and end date to start at midnight.
normalized_start_date = datetime.datetime(
year=zoom_start_date.year,
month=zoom_start_date.month,
day=zoom_start_date.day,
tzinfo=zoom_start_date.tzinfo)
zoom_end_date_plus_one = zoom_end_date + datetime.timedelta(days=1)
normalized_end_date = datetime.datetime(
year=zoom_end_date_plus_one.year,
month=zoom_end_date_plus_one.month,
day=zoom_end_date_plus_one.day,
tzinfo=zoom_end_date_plus_one.tzinfo)
cache_timeout = 60
else:
# Normalize the start and end date to start at start of the month.
# And do it from the start/end date that's set in the view state.
normalized_start_date = datetime.datetime(
year=view_state_start_date.year,
month=view_state_start_date.month,
day=1,
tzinfo=view_state_start_date.tzinfo)
view_state_end_date_plus_one_month = (view_state_end_date +
datetime.timedelta(days=30))
normalized_end_date = datetime.datetime(
year=view_state_end_date_plus_one_month.year,
month=view_state_end_date_plus_one_month.month,
day=1,
tzinfo=view_state_end_date_plus_one_month.tzinfo)
if ((view_state_end_date - view_state_start_date)
< datetime.timedelta(days=60)):
cache_timeout = 15 * 60
else:
cache_timeout = 20 * 60 * 60
logger.debug("Timeseries req from %s to %s",
zoom_start_date, zoom_end_date)
logger.debug("View state is from %s to %s",
view_state_start_date, view_state_end_date)
logger.debug("We're querying from %s to %s",
normalized_start_date, normalized_end_date)
CACHE_VERSION = 4
cache_key = ':'.join(['get_timeseries',
str(CACHE_VERSION),
str(filter_id),
str(location_id),
str(self.slug),
str(parameter_id),
str(normalized_start_date),
str(normalized_end_date)])
cache_key = hashlib.md5(cache_key).hexdigest()
try:
big_cache = get_cache('big_cache')
# This is supposed to be a filesystem cache: for big items.
except:
big_cache = cache
result = big_cache.get(cache_key)
if result is None:
logger.debug("Cache miss for %s", cache_key)
result = self._get_timeseries(filter_id, location_id,
parameter_id, normalized_start_date,
normalized_end_date)
if result: # Don't store empty results
big_cache.set(cache_key, result, cache_timeout)
logger.debug("Stored the cache for %s", cache_key)
else:
logger.debug("Cache hit for %s", cache_key)
if isinstance(result, dict):
# Corner case due to xmlrpclib returning lists with len(1) as a
# value instead.
result = [result]
result = [row for row in result
if row['time'] >= zoom_start_date
and row['time'] <= zoom_end_date]
if result:
logger.debug("Start date: %s, first returned result's time: %s",
zoom_start_date, result[0]['time'])
return result
def _get_timeseries(self, filter_id, location_id,
parameter_id, start_date, end_date):
q = ("select time, value from "
"extimeseries where filterid='%s' and locationid='%s' "
"and parameterid='%s' and time between '%s' and '%s'" %
(filter_id, location_id, parameter_id,
start_date.strftime(JDBC_DATE_FORMAT),
end_date.strftime(JDBC_DATE_FORMAT)))
return self.query(q, is_timeseries_query=True)
def get_unit(self, parameter_id):
"""
Gets unit for given parameter.
select unit from parameters where id='<parameter_id>'
Assumes 1 row is fetched.
"""
q = ("select unit from parameters where id='%s'" % parameter_id)
query_result = self.query(q)
return query_result[0][0] # First row, first column.
def get_name_and_unit(self, parameter_id):
"""
Gets name and unit for given parameter.
Assumes 1 row is fetched.
"""
cache_key = 'name_and_unit:%s:%s:%s' % (
get_host(), parameter_id, self.slug)
result = cache.get(cache_key)
if result is None:
q = ("select name, unit from parameters where id='%s'"
% parameter_id)
query_result = self.query(q)
result = query_result[0] # First row, first column.
cache.set(cache_key, result, 60 * 60)
return result
def get_absolute_url(self):
return reverse('lizard_fewsjdbc.jdbc_source',
kwargs={'jdbc_source_slug': self.slug})
@cached_property
def timezone(self):
"""Return a tzinfo object for the current JDBC source."""
try:
return pytz.timezone(self.timezone_string)
except UnknownTimeZoneError:
return None
class IconStyle(models.Model):
"""
Customizable icon styles where all "selector fields" are optional.
The styles are cached for performance.
"""
# Selector fields.
jdbc_source = models.ForeignKey(JdbcSource, null=True, blank=True)
fews_filter = models.CharField(max_length=40, null=True, blank=True)
fews_location = models.CharField(max_length=40, null=True, blank=True)
fews_parameter = models.CharField(max_length=40, null=True, blank=True)
# Icon properties.
icon = models.CharField(max_length=40, choices=list_image_file_names())
mask = models.CharField(max_length=40, choices=list_image_file_names())
color = ColorField(help_text="Use color format ffffff or 333333")
draw_in_legend = models.BooleanField(default=True)
class Meta:
verbose_name = _("Icon style")
verbose_name_plural = _("Icon styles")
def __unicode__(self):
return u'%s' % (self._key)
@classmethod
def CACHE_KEY(cls):
return 'lizard_fewsjdbc.IconStyle.%s' % (get_host(), )
@cached_property
def _key(self):
return '%s::%s::%s::%s' % (
self.jdbc_source.id if self.jdbc_source else '',
self.fews_filter,
self.fews_location,
self.fews_parameter)
@classmethod
def _styles(cls):
"""
Return styles in a symbol manager style in a dict.
The dict key consist of
"jdbc_source_id::fews_filter::fews_location::fews_parameter"
"""
result = {}
for icon_style in cls.objects.all():
result[icon_style._key] = {
'icon': icon_style.icon,
'mask': (icon_style.mask, ),
'color': icon_style.color.to_tuple(),
'draw_in_legend': icon_style.draw_in_legend
}
return result
@classmethod
def _lookup(cls):
"""
Return style lookup dictionary based on class objects.
This lookup dictionary is cached and it is rebuild every time
the IconStyle table changes.
The structure (always) has 4 levels and is used to lookup icon
styles with fallback in a fast way:
level 0 (highest) {None: {level1}, <jdbc_source_id>: {level1},
... }
level 1 {None: {level2}, "<fews_filter_id>": {level2}, ...}
level 2 {None: {level3}, "<fews_location_id>": {level3}, ...}
level 3 {None: icon_key, "<fews_parameter_id>": icon_key, ...}
"""
lookup = {}
# Insert style into lookup
for style in cls.objects.all():
level0 = style.jdbc_source.id if style.jdbc_source else None
level1 = style.fews_filter if style.fews_filter else None
level2 = style.fews_location if style.fews_location else None
level3 = (style.fews_parameter
if style.fews_parameter else None)
if level0 not in lookup:
lookup[level0] = {}
if level1 not in lookup[level0]:
lookup[level0][level1] = {}
if level2 not in lookup[level0][level1]:
lookup[level0][level1][level2] = {}
if level3 not in lookup[level0][level1][level2]:
lookup[level0][level1][level2][level3] = style._key
# Every 'breach' needs a 'None' / default side.
if None not in lookup:
lookup[None] = {}
if None not in lookup[level0]:
lookup[level0][None] = {}
if None not in lookup[level0][level1]:
lookup[level0][level1][None] = {}
if None not in lookup[level0][level1][level2]:
lookup[level0][level1][level2][None] = '%s::%s::%s::' % (
level0 if level0 else '',
level1 if level1 else '',
level2 if level2 else '')
return lookup
@classmethod
def _styles_lookup(cls, ignore_cache=False):
cache_lookup = cache.get(cls.CACHE_KEY())
if cache_lookup is None or ignore_cache:
# Calculate styles and lookup and store in cache.
styles = cls._styles()
lookup = cls._lookup()
cache_timeout = 60 * 60
cache.set(cls.CACHE_KEY(), (styles, lookup), cache_timeout)
else:
# The cache has a 2-tuple (styles, lookup) stored.
styles, lookup = cache_lookup
return styles, lookup
@classmethod
def style(
cls,
jdbc_source, fews_filter,
fews_location, fews_parameter,
styles=None, lookup=None, ignore_cache=False):
"""
Return the best corresponding icon style and return in format:
'xx::yy::zz::aa',
{'icon': 'icon.png',
'mask': 'mask.png',
'color': (1,1,1,0),
'draw_in_legend': True
}
"""
if styles is None or lookup is None:
styles, lookup = cls._styles_lookup(ignore_cache)
try:
level1 = lookup.get(jdbc_source.id, lookup[None])
level2 = level1.get(fews_filter, level1[None])
level3 = level2.get(fews_location, level2[None])
found_key = level3.get(fews_parameter, level3[None])
result = styles[found_key]
except KeyError:
# Default, this only occurs when '::::::' is not defined
return '::::::', {
'icon': 'meetpuntPeil.png',
'mask': ('meetpuntPeil_mask.png', ),
'color': (0.0, 0.5, 1.0, 1.0),
'draw_in_legend': True
}
return found_key, result
class Threshold(models.Model):
"""
Contains threshold information for fews objects. Can be used for showing
threshold lines in fews objects graphs.
"""
name = models.CharField(verbose_name=_("name"), max_length=100)
label = models.CharField(max_length=100, help_text=_("Label on plot."),
blank=True, null=True, verbose_name=_("label"))
filter_id = models.CharField(max_length=100, blank=True, null=True)
parameter_id = models.CharField(max_length=100, blank=True, null=True)
location_id = models.CharField(max_length=100, blank=True, null=True)
value = models.DecimalField(max_digits=16, decimal_places=8,
verbose_name=_("value"))
color = models.CharField(
verbose_name=_("color"),
max_length=6,
help_text="rrggbb kleurcode, dus 000000=zwart, ff0000=rood, enz.",
default='000000')
class Meta:
verbose_name = _("threshold")
verbose_name_plural = _("thresholds")
ordering = ('id',)
@property
def pretty_value(self):
return format_number(self.value)
def __unicode__(self):
return "%s : %s (id: %s)" % (self.name, self.value, self.id)
# For Django 1.3:
# @receiver(post_save, sender=Setting)
# @receiver(post_delete, sender=Setting)
def icon_style_post_save_delete(sender, **kwargs):
"""
Invalidates cache after saving or deleting an IconStyle.
"""
logger.debug('Changed IconStyle. Invalidating cache for %s...',
sender.CACHE_KEY())
cache.delete(sender.CACHE_KEY())
post_save.connect(icon_style_post_save_delete, sender=IconStyle)
post_delete.connect(icon_style_post_save_delete, sender=IconStyle)
|
lizardsystem/lizard-fewsjdbc
|
lizard_fewsjdbc/models.py
|
Python
|
gpl-3.0
| 28,909
|
[
"VisIt"
] |
ff58408149e22b25005c8f50692aea3e54e4292674c8a969d99da7a597f5edec
|
import unittest
from paste.deploy import appconfig
from ecomaps.config.environment import load_environment
from ecomaps.services.netcdf import EcoMapsNetCdfFile, NetCdfService
__author__ = 'Phil Jenkins (Tessella)'
class EcoMapsNetCdfFileTest(unittest.TestCase):
def test_coverage_file_can_be_read(self):
file = EcoMapsNetCdfFile("http://localhost:8080/thredds/dodsC/testAll/Test-New-R-Code-2_2014-02-18T11:05:13.802146.nc")
self.assertNotEqual(None, file.attributes)
self.assertNotEqual(None, file.columns)
def test_result_file_can_be_read(self):
file = EcoMapsNetCdfFile("http://localhost:8080/thredds/dodsC/testAll/LCM2007_GB_1K_DOM_TAR.nc")
self.assertNotEqual(None, file.attributes)
self.assertNotEqual(None, file.columns)
def test_service_can_filter_attributes(self):
cols = ["title", "description"]
s = NetCdfService()
attributes = s.get_attributes("http://localhost:8080/thredds/dodsC/testAll/Test-New-R-Code-2_2014-02-18T11:05:13.802146.nc", cols)
self.assertEqual(len(attributes.keys()), 2)
def test_get_column_names_returns_expected_names(self):
s = NetCdfService()
columns = s.get_variable_column_names("http://localhost:8080/thredds/dodsC/testAll/LCM2007_GB_1K_DOM_TAR.nc")
self.assertEqual(columns, ['LandCover'])
def test_temporal_dataset(self):
f = EcoMapsNetCdfFile('http://thredds-prod.nerc-lancaster.ac.uk/thredds/dodsC/ECOMAPSDetail/ECOMAPSInputLOI01.nc')
g=0
def test_overlay_point_data(self):
conf = appconfig('config:test.ini', relative_to='.')
test_conf = load_environment(conf.global_conf, conf.local_conf)
s= NetCdfService(config=test_conf)
self.assertNotEqual(None, s.overlay_point_data('http://thredds-prod.nerc-lancaster.ac.uk/thredds/dodsC/ECOMAPSDetail/ECOMAPSInputLOI01.nc'))
|
NERC-CEH/ecomaps
|
ecomaps/services/tests/netcdf_test.py
|
Python
|
gpl-2.0
| 1,902
|
[
"NetCDF"
] |
fed79b7f4913fe1e5155323713ebf36e21981ba5b5686216978b9640c2810a8e
|
# Map from VTK to enable key names
KEY_MAP = {
"Esc": "Esc",
"Tab": "Tab",
"Backtab": "Backtab",
"Backspace": "Backspace",
"Return": "Enter",
"Enter": "Enter",
"Insert": "Insert",
"Delete": "Delete",
"Pause": "Pause",
"Print": "Print",
"Sysreq": "Sysreq",
"Clear": "Clear",
"Home": "Home",
"End": "End",
"Left": "Left",
"Up": "Up",
"Right": "Right",
"Down": "Down",
"Prior": "Page Up",
"Next": "Page Down",
"Meta": "Meta",
"Caps_Lock": "Caps Lock",
"Num_Lock": "Num Lock",
"Scroll_Lock": "Scroll Lock",
"F1": "F1",
"F2": "F2",
"F3": "F3",
"F4": "F4",
"F5": "F5",
"F6": "F6",
"F7": "F7",
"F8": "F8",
"F9": "F9",
"F10": "F10",
"F11": "F11",
"F12": "F12",
}
|
tommy-u/enable
|
enable/vtk_backend/constants.py
|
Python
|
bsd-3-clause
| 799
|
[
"VTK"
] |
af7ec748cb033a1ef02655791e752d5778a79f1a66724eb6d4173089d301779c
|
#! /usr/bin/env python
# findlinksto
#
# find symbolic links to a path matching a regular expression
import os
import sys
import regex
import getopt
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], '')
if len(args) < 2:
raise getopt.error, 'not enough arguments'
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print 'usage: findlinksto pattern directory ...'
sys.exit(2)
pat, dirs = args[0], args[1:]
prog = regex.compile(pat)
for dirname in dirs:
os.path.walk(dirname, visit, prog)
def visit(prog, dirname, names):
if os.path.islink(dirname):
names[:] = []
return
if os.path.ismount(dirname):
print 'descend into', dirname
for name in names:
name = os.path.join(dirname, name)
try:
linkto = os.readlink(name)
if prog.search(linkto) >= 0:
print name, '->', linkto
except os.error:
pass
main()
|
sensysnetworks/uClinux
|
user/python/Tools/scripts/findlinksto.py
|
Python
|
gpl-2.0
| 870
|
[
"VisIt"
] |
f32d4450d23b62b93d0efeb4c44402eb6b746f2ee8bf8296a426d7679c6fedde
|
########################################################################
# $HeadURL$
########################################################################
""" SandboxMetadataDB class is a front-end to the metadata for sandboxes
"""
__RCSID__ = "$Id$"
import types
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import List
from DIRAC.Core.Security import Properties, CS
class SandboxMetadataDB( DB ):
def __init__( self, maxQueueSize = 10 ):
DB.__init__( self, 'SandboxMetadataDB', 'WorkloadManagement/SandboxMetadataDB', maxQueueSize )
result = self.__initializeDB()
if not result[ 'OK' ]:
raise RuntimeError( "Can't create tables: %s" % result[ 'Message' ] )
self.__assignedSBGraceDays = 0
self.__unassignedSBGraceDays = 15
def __initializeDB( self ):
"""
Create the tables
"""
result = self._query( "show tables" )
if not result[ 'OK' ]:
return result
tablesInDB = [ t[0] for t in result[ 'Value' ] ]
tablesToCreate = {}
self.__tablesDesc = {}
self.__tablesDesc[ 'sb_Owners' ] = { 'Fields' : { 'OwnerId' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL',
'Owner' : 'VARCHAR(32) NOT NULL',
'OwnerDN' : 'VARCHAR(255) NOT NULL',
'OwnerGroup' : 'VARCHAR(32) NOT NULL',
},
'PrimaryKey' : 'OwnerId',
}
self.__tablesDesc[ 'sb_SandBoxes' ] = { 'Fields' : { 'SBId' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL',
'OwnerId' : 'INTEGER UNSIGNED NOT NULL',
'SEName' : 'VARCHAR(64) NOT NULL',
'SEPFN' : 'VARCHAR(512) NOT NULL',
'Bytes' : 'BIGINT NOT NULL DEFAULT 0',
'RegistrationTime' : 'DATETIME NOT NULL',
'LastAccessTime' : 'DATETIME NOT NULL',
'Assigned' : 'TINYINT NOT NULL DEFAULT 0',
},
'PrimaryKey' : 'SBId',
'Indexes': { 'SBOwner': [ 'OwnerId' ],
},
'UniqueIndexes' : { 'Location' : [ 'SEName', 'SEPFN' ] }
}
self.__tablesDesc[ 'sb_EntityMapping' ] = { 'Fields' : { 'SBId' : 'INTEGER UNSIGNED NOT NULL',
'EntitySetup' : 'VARCHAR(64) NOT NULL',
'EntityId' : 'VARCHAR(128) NOT NULL',
'Type' : 'VARCHAR(64) NOT NULL',
},
'Indexes': { 'Entity': [ 'EntityId', 'EntitySetup' ],
'SBIndex' : [ 'SBId' ]
},
'UniqueIndexes' : { 'Mapping' : [ 'SBId', 'EntitySetup', 'EntityId', 'Type' ] }
}
for tableName in self.__tablesDesc:
if not tableName in tablesInDB:
tablesToCreate[ tableName ] = self.__tablesDesc[ tableName ]
return self._createTables( tablesToCreate )
def registerAndGetOwnerId( self, owner, ownerDN, ownerGroup ):
"""
Get the owner ID and register it if it's not there
"""
sqlCmd = "SELECT OwnerId FROM `sb_Owners` WHERE Owner='%s' AND OwnerDN='%s' AND OwnerGroup='%s'" % ( owner,
ownerDN,
ownerGroup )
result = self._query( sqlCmd )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
return S_OK( data[0][0] )
#Its not there, insert it
sqlCmd = "INSERT INTO `sb_Owners` ( OwnerId, Owner, OwnerDN, OwnerGroup ) VALUES ( 0, '%s', '%s', '%s' )" % ( owner,
ownerDN,
ownerGroup )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
return result
if 'lastRowId' in result:
return S_OK( result[ 'lastRowId' ] )
result = self._query( "SELECT LAST_INSERT_ID()" )
if not result[ 'OK' ]:
return S_ERROR( "Can't determine owner id after insertion" )
return S_OK( result[ 'Value' ][0][0] )
def registerAndGetSandbox( self, owner, ownerDN, ownerGroup, sbSE, sbPFN, size = 0 ):
"""
Register a new sandbox in the metadata catalog
Returns ( sbid, newSandbox )
"""
result = self.registerAndGetOwnerId( owner, ownerDN, ownerGroup )
if not result[ 'OK' ]:
return result
ownerId = result[ 'Value' ]
sqlCmd = "INSERT INTO `sb_SandBoxes` ( SBId, OwnerId, SEName, SEPFN, Bytes, RegistrationTime, LastAccessTime )"
sqlCmd = "%s VALUES ( 0, '%s', '%s', '%s', %d, UTC_TIMESTAMP(), UTC_TIMESTAMP() )" % ( sqlCmd, ownerId, sbSE,
sbPFN, size )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
if result[ 'Message' ].find( "Duplicate entry" ) == -1 :
return result
#It's a duplicate, try to retrieve sbid
sqlCond = [ "SEPFN='%s'" % sbPFN, "SEName='%s'" % sbSE, "OwnerId='%s'" % ownerId ]
sqlCmd = "SELECT SBId FROM `sb_SandBoxes` WHERE %s" % " AND ".join( sqlCond )
result = self._query( sqlCmd )
if not result[ 'OK' ]:
return result
if len( result[ 'Value' ] ) == 0:
return S_ERROR( "Location %s already exists but doesn't belong to the user or setup" )
sbId = result[ 'Value' ][0][0]
self.accessedSandboxById( sbId )
return S_OK( ( sbId, False ) )
#Inserted, time to get the id
if 'lastRowId' in result:
return S_OK( ( result['lastRowId'], True ) )
result = self._query( "SELECT LAST_INSERT_ID()" )
if not result[ 'OK' ]:
return S_ERROR( "Can't determine sand box id after insertion" )
return S_OK( ( result[ 'Value' ][0][0], True ) )
def accessedSandboxById( self, sbId ):
"""
Update last access time for sb id
"""
return self.__accessedSandboxByCond( { 'SBId': sbId } )
def accessedSandboxByLocation( self, seName, sePFN ):
"""
Update last access time for location
"""
return self.__accessedSandboxByCond( { 'SEName': self._escapeString( seName )[ 'Value' ],
'SEPFN': self._escapeString( sePFN )[ 'Value' ],
} )
def __accessedSandboxByCond( self, condDict ):
sqlCond = [ "%s=%s" % ( key, condDict[ key ] ) for key in condDict ]
return self._update( "UPDATE `sb_SandBoxes` SET LastAccessTime=UTC_TIMESTAMP() WHERE %s" % " AND ".join( sqlCond ) )
def assignSandboxesToEntities( self, enDict, requesterName, requesterGroup, enSetup, ownerName = "", ownerGroup = "" ):
"""
Assign jobs to entities
"""
if ownerName or ownerGroup:
requesterProps = CS.getPropertiesForEntity( requesterGroup, name = requesterName )
if Properties.JOB_ADMINISTRATOR in requesterProps:
if ownerName:
requesterName = ownerName
if ownerGroup:
requesterGroup = ownerGroup
entitiesToSandboxList = []
for entityId in enDict:
for sbTuple in enDict[ entityId ]:
if type( sbTuple ) not in ( types.TupleType, types.ListType ):
return S_ERROR( "Entry for entity %s is not a itterable of tuples/lists" % entityId )
if len( sbTuple ) != 2:
return S_ERROR( "SB definition is not ( SBLocation, Type )! It's '%s'" % str( sbTuple ) )
SBLocation = sbTuple[0]
if SBLocation.find( "SB:" ) != 0:
return S_ERROR( "%s doesn't seem to be a sandbox" % SBLocation )
SBLocation = SBLocation[3:]
splitted = List.fromChar( SBLocation, "|" )
if len( splitted ) < 2:
return S_ERROR( "SB Location has to have SEName|SEPFN form" )
SEName = splitted[0]
SEPFN = ":".join( splitted[1:] )
entitiesToSandboxList.append( ( entityId, enSetup, sbTuple[1], SEName, SEPFN ) )
if not entitiesToSandboxList:
return S_OK()
sbIds = []
assigned = 0
for entityId, entitySetup, SBType, SEName, SEPFN in entitiesToSandboxList:
result = self.getSandboxId( SEName, SEPFN, requesterName, requesterGroup )
insertValues = []
if not result[ 'OK' ]:
self.log.warn( "Cannot find id for %s:%s with requester %s@%s" % ( SEName, SEPFN, requesterName, requesterGroup ) )
else:
sbId = result['Value' ]
sbIds.append( str( sbId ) )
insertValues.append( "( %s, %s, %s, %d )" % ( self._escapeString( entityId )[ 'Value' ],
self._escapeString( entitySetup )[ 'Value' ],
self._escapeString( SBType )[ 'Value' ],
sbId ) )
if not insertValues:
return S_ERROR( "Sandbox does not exist or you're not authorized to assign it being %s@%s" % ( requesterName, requesterGroup ) )
sqlCmd = "INSERT INTO `sb_EntityMapping` ( entityId, entitySetup, Type, SBId ) VALUES %s" % ", ".join( insertValues )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
if result[ 'Message' ].find( "Duplicate entry" ) == -1:
return result
assigned += 1
sqlCmd = "UPDATE `sb_SandBoxes` SET Assigned=1 WHERE SBId in ( %s )" % ", ".join( sbIds )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
return result
return S_OK( assigned )
def __filterEntitiesByRequester( self, entitiesList, entitiesSetup, requesterName, requesterGroup ):
"""
Given a list of entities and a requester, return the ones that the requester is allowed to modify
"""
sqlCond = [ "s.OwnerId=o.OwnerId" , "s.SBId=e.SBId", "e.EntitySetup=%s" % entitiesSetup ]
requesterProps = CS.getPropertiesForEntity( requesterGroup, name = requesterName )
if Properties.JOB_ADMINISTRATOR in requesterProps:
#Do nothing, just ensure it doesn't fit in the other cases
pass
elif Properties.JOB_SHARING in requesterProps:
sqlCond.append( "o.OwnerGroup='%s'" % requesterGroup )
elif Properties.NORMAL_USER in requesterProps:
sqlCond.append( "o.OwnerGroup='%s'" % requesterGroup )
sqlCond.append( "o.Owner='%s'" % requesterName )
else:
return S_ERROR( "Not authorized to access sandbox" )
for i in range( len( entitiesList ) ):
entitiesList[i] = self._escapeString( entitiesList[ i ] )[ 'Value' ]
if len( entitiesList ) == 1:
sqlCond.append( "e.EntityId = %s" % entitiesList[0] )
else:
sqlCond.append( "e.EntityId in ( %s )" % ", ".join( entitiesList ) )
sqlCmd = "SELECT DISTINCT e.EntityId FROM `sb_EntityMapping` e, `sb_SandBoxes` s, `sb_Owners` o WHERE"
sqlCmd = "%s %s" % ( sqlCmd, " AND ".join( sqlCond ) )
result = self._query( sqlCmd )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
def unassignEntities( self, entitiesDict, requesterName, requesterGroup ):
"""
Unassign jobs to sandboxes
entitiesDict = { 'setup' : [ 'entityId', 'entityId' ] }
"""
updated = 0
for entitySetup in entitiesDict:
entitiesIds = entitiesDict[ entitySetup ]
if not entitiesIds:
continue
escapedSetup = self._escapeString( entitySetup )[ 'Value' ]
result = self.__filterEntitiesByRequester( entitiesIds, escapedSetup, requesterName, requesterGroup )
if not result[ 'OK' ]:
gLogger.error( "Cannot filter entities: %s" % result[ 'Message' ] )
continue
ids = result[ 'Value' ]
if not ids:
return S_OK( 0 )
sqlCond = [ "EntitySetup = %s" % escapedSetup ]
sqlCond.append( "EntityId in ( %s )" % ", ".join ( [ "'%s'" % str( eid ) for eid in ids ] ) )
sqlCmd = "DELETE FROM `sb_EntityMapping` WHERE %s" % " AND ".join( sqlCond )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
gLogger.error( "Cannot unassign entities: %s" % result[ 'Message' ] )
else:
updated += 1
return S_OK( updated )
def getSandboxesAssignedToEntity( self, entityId, entitySetup, requesterName, requesterGroup ):
"""
Get the sandboxes and the type of assignation to the jobId
"""
sqlTables = [ "`sb_SandBoxes` s", "`sb_EntityMapping` e" ]
sqlCond = [ "s.SBId = e.SBId",
"e.EntityId = %s" % self._escapeString( entityId )[ 'Value' ],
"e.EntitySetup = %s" % self._escapeString( entitySetup )[ 'Value' ] ]
requesterProps = CS.getPropertiesForEntity( requesterGroup, name = requesterName )
if Properties.JOB_ADMINISTRATOR in requesterProps:
#Do nothing, just ensure it doesn't fit in the other cases
pass
elif Properties.JOB_SHARING in requesterProps:
sqlTables.append( "`sb_Owners` o" )
sqlCond.append( "o.OwnerGroup='%s'" % requesterGroup )
sqlCond.append( "s.OwnerId=o.OwnerId" )
elif Properties.NORMAL_USER in requesterProps:
sqlTables.append( "`sb_Owners` o" )
sqlCond.append( "o.OwnerGroup='%s'" % requesterGroup )
sqlCond.append( "o.Owner='%s'" % requesterName )
sqlCond.append( "s.OwnerId=o.OwnerId" )
else:
return S_ERROR( "Not authorized to access sandbox" )
sqlCmd = "SELECT DISTINCT s.SEName, s.SEPFN, e.Type FROM %s WHERE %s" % ( ", ".join( sqlTables ),
" AND ".join( sqlCond ) )
return self._query( sqlCmd )
def getUnusedSandboxes( self ):
"""
Get sandboxes that have been assigned but the job is no longer there
"""
sqlCond = [ "Assigned AND SBId NOT IN ( SELECT SBId FROM `sb_EntityMapping` ) AND TIMESTAMPDIFF( DAY, LastAccessTime, UTC_TIMESTAMP() ) >= %d" % self.__assignedSBGraceDays,
"! Assigned AND TIMESTAMPDIFF( DAY, LastAccessTime, UTC_TIMESTAMP() ) >= %s" % self.__unassignedSBGraceDays]
sqlCmd = "SELECT SBId, SEName, SEPFN FROM `sb_SandBoxes` WHERE ( %s )" % " ) OR ( ".join( sqlCond )
return self._query( sqlCmd )
def deleteSandboxes( self, SBIdList ):
"""
Delete sandboxes
"""
sqlSBList = ", ".join( [ str( sbid ) for sbid in SBIdList ] )
for table in ( 'sb_SandBoxes', 'sb_EntityMapping' ):
sqlCmd = "DELETE FROM `%s` WHERE SBId IN ( %s )" % ( table, sqlSBList )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
return result
return S_OK()
def setLocation( self, SBId, location ):
"""
Set the Location for a sandbox
"""
return self._update( "UPDATE `sb_SandBoxes` SET Location='%s' WHERE SBId = %s" % ( location, SBId ) )
def getSandboxId( self, SEName, SEPFN, requesterName, requesterGroup ):
"""
Get the sandboxId if it exists
"""
sqlCond = [ "s.SEPFN=%s" % self._escapeString( SEPFN )['Value'],
"s.SEName=%s" % self._escapeString( SEName )['Value'],
's.OwnerId=o.OwnerId' ]
sqlCmd = "SELECT s.SBId FROM `sb_SandBoxes` s, `sb_Owners` o WHERE"
requesterProps = CS.getPropertiesForEntity( requesterGroup, name = requesterName )
if Properties.JOB_ADMINISTRATOR in requesterProps:
#Do nothing, just ensure it doesn't fit in the other cases
pass
elif Properties.JOB_SHARING in requesterProps:
sqlCond.append( "o.OwnerGroup='%s'" % requesterGroup )
elif Properties.NORMAL_USER in requesterProps:
sqlCond.append( "o.OwnerGroup='%s'" % requesterGroup )
sqlCond.append( "o.Owner='%s'" % requesterName )
else:
return S_ERROR( "Not authorized to access sandbox" )
result = self._query( "%s %s" % ( sqlCmd, " AND ".join( sqlCond ) ) )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 1:
self.log.error( "More than one sandbox registered with the same Id!", data )
if len( data ) == 0:
return S_ERROR( "No sandbox matches the requirements" )
return S_OK( data[0][0] )
|
avedaee/DIRAC
|
WorkloadManagementSystem/DB/SandboxMetadataDB.py
|
Python
|
gpl-3.0
| 17,132
|
[
"DIRAC"
] |
f9cce57d000ea75067e4ec2aa9b33b91ff796bf4fc4f63d42d8348e15cbf63f6
|
''' <h1> Library for specular magnetic x-ray reflectivity</h1>
The magnetic reflectivity is calculated according to: S.A. Stephanov and S.K Shina PRB 61 15304.
Note: The documentation is not updated from the interdiff model!
<h2>Classes</h2>
<h3>Layer</h3>
<code> Layer(b = 0.0, d = 0.0, f = 0.0+0.0J, dens = 1.0, magn_ang = 0.0, magn = 0.0, sigma = 0.0)</code>
<dl>
<dt><code><b>d</b></code></dt>
<dd>The thickness of the layer in AA (Angstroms = 1e-10m)</dd>
<dt><code><b>f</b></code></dt>
<dd>The x-ray scattering length per formula unit in electrons. To be strict it is the
number of Thompson scattering lengths for each formula unit.</dd>
<dt><code><b>dens</b></code></dt>
<dd>The density of formula units in units per Angstroms. Note the units!</dd>
<dt><code><b>sigmai</b></code></dt>
<dd>The root mean square <em>interdiffusion</em> of the top interface of the layer in Angstroms.</dd>
<dt><code><b>sigmar</b></code></dt>
<dd>The root mean square <em>roughness</em> of the top interface of the layer in Angstroms.</dd>
</dl>
<h3>Stack</h3>
<code> Stack(Layers = [], Repetitions = 1)</code>
<dl>
<dt><code><b>Layers</b></code></dt>
<dd>A <code>list</code> consiting of <code>Layer</code>s in the stack
the first item is the layer closest to the bottom</dd>
<dt><code><b>Repetitions</b></code></dt>
<dd>The number of repsetions of the stack</dd>
</dl>
<h3>Sample</h3>
<code> Sample(Stacks = [], Ambient = Layer(), Substrate = Layer(), eta_z = 10.0,
eta_x = 10.0, h = 1.0)</code>
<dl>
<dt><code><b>Stacks</b></code></dt>
<dd>A <code>list</code> consiting of <code>Stack</code>s in the stacks
the first item is the layer closest to the bottom</dd>
<dt><code><b>Ambient</b></code></dt>
<dd>A <code>Layer</code> describing the Ambient (enviroment above the sample).
Only the scattering lengths and density of the layer is used.</dd>
<dt><code><b>Substrate</b></code></dt>
<dd>A <code>Layer</code> describing the substrate (enviroment below the sample).
Only the scattering lengths, density and roughness of the layer is used.</dd>
<dt><code><b>eta_z</b></code></dt>
<dd>The out-of plane (vertical) correlation length of the roughness
in the sample. Given in AA. </dd>
<dt><code><b>eta_x</b></code></dt>
<dd>The in-plane global correlation length (it is assumed equal for all layers).
Given in AA.</dd>
<dt><code><b>h</b></code></dt>
<dd>The jaggedness parameter, should be between 0 and 1.0. This describes
how jagged the interfaces are. This is also a global parameter for all
interfaces.</dd>
</dl>
<h3>Instrument</h3>
<code>Instrument(wavelength = 1.54, coords = 'tth',
I0 = 1.0 res = 0.001, restype = 'no conv', respoints = 5, resintrange = 2,
beamw = 0.01, footype = 'no corr', samplelen = 10.0, taylor_n = 1)</code>
<dl>
<dt><code><b>wavelength</b></code></dt>
<dd>The wavalelngth of the radiation givenin AA (Angstroms)</dd>
<dt><code><b>coords</b></code></dt>
<dd>The coordinates of the data given to the SimSpecular function.
The available alternatives are: 'q' or 'tth'. Alternatively the numbers
0 (q) or 1 (tth) can be used.</dd>
<dt><code><b>I0</b></code></dt>
<dd>The incident intensity (a scaling factor)</dd>
<dt><code><b>Ibkg</b></code></dt>
<dd>The background intensity. Added as a constant value to the calculated
reflectivity</dd>
<dt><code><b>res</b></code></dt>
<dd>The resolution of the instrument given in the coordinates of
<code>coords</code>. This assumes a gaussian reloution function and
<code>res</code> is the standard deviation of that gaussian.</dd>
<dt><code><b>restype</b></code></dt>
<dd>Describes the rype of the resolution calculated. One of the alterantives:
'no conv', 'fast conv', 'full conv and varying res.' or 'fast conv + varying res.'.
The respective numbers 0-3 also works. Note that fast convolution only alllows
a single value into res wheras the other can also take an array with the
same length as the x-data (varying resolution)</dd>
<dt><code><b>respoints</b></code></dt>
<dd>The number of points to include in the resolution calculation. This is only
used for 'full conv and vaying res.' and 'fast conv + varying res'</dd>
<dt><code><b>resintrange</b></code></dt>
<dd>Number of standard deviatons to integrate the resolution fucntion times
the relfectivty over</dd>
<dt><code><b>footype</b></code></dt>
<dd>Which type of footprint correction is to be applied to the simulation.
One of: 'no corr', 'gauss beam' or 'square beam'. Alternatively,
the number 0-2 are also valid. The different choices are self explanatory.
</dd>
<dt><code><b>beamw</b></code></dt>
<dd>The width of the beam given in mm. For 'gauss beam' it should be
the standard deviation. For 'square beam' it is the full width of the beam.</dd>
<dt><code><b>samplelen</b></code></dt>
<dd>The length of the sample given in mm</dd>
<dt><code><b>taylor_n</b></code></dt>
<dd>The number terms taken into account in the taylor expansion of
the fourier integral of the correlation function. More terms more accurate
calculation but also much slower.</dd>
'''
import lib.xrmr
from numpy import *
from scipy.special import erf
from lib.instrument import *
# Preamble to define the parameters needed for the models outlined below:
ModelID='StephanovXRMR'
# Automatic loading of parameters possible by including this list
__pars__ = ['Layer', 'Stack', 'Sample', 'Instrument']
# Used for making choices in the GUI
instrument_string_choices = {'coords': ['q','tth'],
'restype': ['no conv', 'fast conv',
'full conv and varying res.',
'fast conv + varying res.'],
'footype': ['no corr', 'gauss beam',
'square beam'],
'pol':['circ+','circ-','tot', 'ass', 'sigma', 'pi']
}
InstrumentParameters={'wavelength':1.54,'coords':'tth','I0':1.0,'res':0.001,\
'restype':'no conv','respoints':5,'resintrange':2,'beamw':0.01,'footype': 'no corr',\
'samplelen':10.0, 'Ibkg': 0.0, 'pol':'circ+'}
# Coordinates=1 => twothetainput
# Coordinates=0 => Q input
#Res stddev of resolution
#ResType 0: No resolution convlution
# 1: Fast convolution
# 2: Full Convolution +varying resolution
# 3: Fast convolution varying resolution
#ResPoints Number of points for the convolution only valid for ResolutionType=2
#ResIntrange Number of standard deviatons to integrate over default 2
# Parameters for footprint coorections
# Footype: 0: No corections for footprint
# 1: Correction for Gaussian beam => Beaw given in mm and stddev
# 2: Correction for square profile => Beaw given in full width mm
# Samlen= Samplelength in mm.
#
#
LayerParameters = {'dens':1.0, 'd':0.0, 'fc':(0.0 + 1e-20J),
'fm1':(0.0 + 1e-20J), 'fm2':(0.0 + 1e-20J),
'phi_m':0.0, 'theta_m':0.0, 'mag_dens':1.0}
StackParameters = {'Layers':[], 'Repetitions':1}
SampleParameters = {'Stacks':[], 'Ambient':None, 'Substrate':None}
# A buffer to save previous calculations for spin-flip calculations
class Buffer:
W = None
parameters = None
def Specular(TwoThetaQz, sample, instrument):
# preamble to get it working with my class interface
restype = instrument.getRestype()
if restype == 2 or restype == instrument_string_choices['restype'][2]:
(TwoThetaQz,weight) = ResolutionVector(TwoThetaQz[:], \
instrument.getRes(), instrument.getRespoints(),\
range=instrument.getResintrange())
if instrument.getCoords() == 1 or\
instrument.getCoords() == instrument_string_choices['coords'][1]:
theta = TwoThetaQz/2
elif instrument.getCoords() == 0 or\
instrument.getCoords() == instrument_string_choices['coords'][0]:
theta = arcsin(TwoThetaQz/4/pi*instrument.getWavelength())*180./pi
lamda = instrument.getWavelength()
parameters = sample.resolveLayerParameters()
dens = array(parameters['dens'], dtype = complex128)
dens = array(parameters['mag_dens'], dtype = complex128)
#print [type(f) for f in parameters['f']]
fc = array(parameters['fc'], dtype = complex128) + (1-1J)*1e-20
fm1 = array(parameters['fm1'], dtype = complex128) + (1-1J)*1e-20
fm2 = array(parameters['fm2'], dtype = complex128) + (1-1J)*1e-20
re = 2.8179402894e-5
A = -lamda**2*re/pi*dens*fc
B = lamda**2*re/pi*dens*fm1
C = lamda**2*re/pi*dens*fm2
d = array(parameters['d'], dtype = float64)
g_0 = sin(theta*pi/180.0)
phi = array(parameters['phi_m'], dtype = float64)*pi/180.0
theta = array(parameters['theta_m'], dtype = float64)*pi/180.0
M = c_[cos(theta)*cos(phi), cos(theta)*sin(phi), sin(theta)]
#print A[::-1], B[::-1], d[::-1], M[::-1], lamda, g_0
W = lib.xrmr.calc_refl(g_0, lamda, A[::-1], 0.0*A[::-1], B[::-1], C[::-1], M[::-1], d[::-1])
trans = ones(W.shape, dtype = complex128); trans[0,1] = 1.0J; trans[1,1] = -1.0J; trans = trans/sqrt(2)
Wc = lib.xrmr.dot2(trans, lib.xrmr.dot2(W, lib.xrmr.inv2(trans)))
#Different polarization channels:
pol = instrument.getPol()
if pol == 0 or pol == instrument_string_choices['pol'][0]:
R = abs(Wc[0,0])**2 + abs(Wc[0,1])**2
elif pol == 1 or pol == instrument_string_choices['pol'][1]:
R = abs(Wc[1,0])**2 + abs(Wc[1,1])**2
elif pol == 2 or pol == instrument_string_choices['pol'][2]:
R = (abs(W[0,0])**2 + abs(W[1,0])**2 + abs(W[0,1])**2 + abs(W[1,1])**2)/2
elif pol == 3 or pol == instrument_string_choices['pol'][3]:
R = 2*(W[0,0]*W[0,1].conj() + W[1,0]*W[1,1].conj()).imag/(abs(W[0,0])**2 + abs(W[1,0])**2 + abs(W[0,1])**2 + abs(W[1,1])**2)
elif pol == 4 or pol == instrument_string_choices['pol'][4]:
R = abs(W[0,0])**2 + abs(W[0,1])**2
elif pol == 5 or pol == instrument_string_choices['pol'][5]:
R = abs(W[1,0])**2 + abs(W[1,1])**2
else:
raise ValueError('Variable pol has an unvalid value')
#FootprintCorrections
foocor = 1.0
footype = instrument.getFootype()
beamw = instrument.getBeamw()
samlen = instrument.getSamplelen()
if footype == 0 or footype == instrument_string_choices['footype'][0]:
foocor = 1.0
elif footype == 1 or footype == instrument_string_choices['footype'][1]:
foocor = GaussIntensity(theta, samlen/2.0, samlen/2.0, beamw)
elif footype == 2 or footype == instrument_string_choices['footype'][2]:
foocor = SquareIntensity(theta, samlen, beamw)
else:
raise ValueError('Variable footype has an unvalid value')
if restype == 0 or restype == instrument_string_choices['restype'][0]:
R = R[:]*foocor
elif restype == 1 or restype == instrument_string_choices['restype'][1]:
R = ConvoluteFast(TwoThetaQz,R[:]*foocor, instrument.getRes(),\
range = instrument.getResintrange())
elif restype == 2 or restype == instrument_string_choices['restype'][2]:
R = ConvoluteResolutionVector(TwoThetaQz,R[:]*foocor, weight)
elif restype == 3 or restype == instrument_string_choices['restype'][3]:
R = ConvoluteFastVar(TwoThetaQz,R[:]*foocor, instrument.getRes(),\
range = instrument.getResintrange())
else:
raise ValueError('Variable restype has an unvalid value')
return R*instrument.getI0() + instrument.getIbkg()
def OffSpecular(TwoThetaQz, ThetaQx, sample, instrument):
raise NotImplementedError('Off specular calculations are not implemented for magnetic x-ray reflectivity')
def SLD_calculations(z, sample, inst):
''' Calculates the scatteringlength density as at the positions z
'''
parameters = sample.resolveLayerParameters()
dens = array(parameters['dens'], dtype = complex64)
mag_dens = array(parameters['mag_dens'], dtype = complex64)
fc = array(parameters['fc'], dtype = complex64)
sldc = dens*fc
d_sldc = sldc[:-1] - sldc[1:]
fm1 = array(parameters['fm1'], dtype = complex64)
fm2 = array(parameters['fm2'], dtype = complex64)
sldm1 = mag_dens*fm1
sldm2 = mag_dens*fm2
d_sldm1 = sldm1[:-1] - sldm1[1:]
d_sldm2 = sldm2[:-1] - sldm2[1:]
d = array(parameters['d'], dtype = float64)
d = d[1:-1]
# Include one extra element - the zero pos (substrate/film interface)
int_pos = cumsum(r_[0,d])
sigma = int_pos*0.0+1e-7
if z == None:
z = arange(min(-sigma[0]*5, -5), max(int_pos.max()+sigma[-1]*5, 5), 0.5)
rho_c = sum(d_sldc*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sldc[-1]
rho_m1 = sum(d_sldm1*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sldm1[-1]
rho_m2 = sum(d_sldm2*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sldm2[-1]
return {'real charge sld': real(rho_c), 'imag charge sld': imag(rho_c),
'real mag_1 sld': real(rho_m1), 'imag mag_1 sld': imag(rho_m1),
'real mag_2 sld': real(rho_m2), 'imag mag_2 sld': imag(rho_m2),
'z':z}
SimulationFunctions = {'Specular':Specular,\
'OffSpecular':OffSpecular,\
'SLD': SLD_calculations}
import lib.refl as Refl
(Instrument, Layer, Stack, Sample) = Refl.MakeClasses(InstrumentParameters,\
LayerParameters,StackParameters,\
SampleParameters, SimulationFunctions, ModelID)
if __name__=='__main__':
pass
|
jackey-qiu/genx_pc_qiu
|
models/xmag.py
|
Python
|
gpl-3.0
| 13,739
|
[
"Gaussian"
] |
acc67360f66eaa313cdb5174d46abf7a2d677e5669857fda52dd7676f4a512a1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.