text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import numpy as np
import kernel as kernels
import bandwidths as bw
#TODO: should this be a function?
class KDE(object):
"""
Kernel Density Estimator
Parameters
----------
x : array-like
N-dimensional array from which the density is to be estimated
kernel : Kernel Class
Should be a class from *
"""
#TODO: amend docs for Nd case?
def __init__(self, x, kernel = None):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
nobs, n_series = x.shape
# print "%s dimensions" % n_series
if kernel is None:
kernel = kernels.Gaussian() # no meaningful bandwidth yet
if n_series > 1:
if isinstance( kernel, kernels.CustomKernel ):
kernel = kernels.NdKernel(n_series, kernels = kernel)
self.kernel = kernel
self.n = n_series # TODO change attribute
self.x = x
def density(self, x):
return self.kernel.density(self.x, x)
def __call__(self, x, h = "scott"):
return np.array([self.density(xx) for xx in x])
def evaluate(self, x, h = "silverman"):
density = self.kernel.density
return np.array([density(xx) for xx in x])
if __name__ == "__main__":
PLOT = True
from numpy import random
import matplotlib.pyplot as plt
import bandwidths as bw
# 1 D case
random.seed(142)
x = random.standard_t(4.2, size = 50)
h = bw.bw_silverman(x)
#NOTE: try to do it with convolution
support = np.linspace(-10,10,512)
kern = kernels.Gaussian(h = h)
kde = KDE( x, kern)
print kde.density(1.015469)
print 0.2034675
Xs = np.arange(-10,10,0.1)
if PLOT:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Xs, kde(Xs), "-")
ax.set_ylim(-10, 10)
ax.set_ylim(0,0.4)
plt.show()
# 2 D case
# from statsmodels.sandbox.nonparametric.testdata import kdetest
# x = zip(kdetest.faithfulData["eruptions"], kdetest.faithfulData["waiting"])
# x = np.array(x)
# H = kdetest.Hpi
# kern = kernel.NdKernel( 2 )
# kde = KernelEstimate( x, kern )
# print kde.density( np.matrix( [1,2 ]).T )
# 5 D case
# random.seed(142)
# mu = [1.0, 4.0, 3.5, -2.4, 0.0]
# sigma = np.matrix(
# [[ 0.6 - 0.1*abs(i-j) if i != j else 1.0 for j in xrange(5)] for i in xrange(5)])
# x = random.multivariate_normal(mu, sigma, size = 100)
# kern = kernel.Gaussian()
# kde = KernelEstimate( x, kern )
|
pprett/statsmodels
|
statsmodels/sandbox/nonparametric/kde2.py
|
Python
|
bsd-3-clause
| 2,549
|
[
"Gaussian"
] |
73b684fb0744f9af968b61621def0f1b1ffe908363e4c7cde94ed115c5644d13
|
"""
DictCache.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import datetime
import threading
# DIRAC
from DIRAC.Core.Utilities.LockRing import LockRing
class ThreadLocalDict(threading.local):
"""This class is just useful to have a mutable object (in this case, a dict) as a thread local
Read the _threading_local docstring for more details.
Its purpose is to have a different cache per thread
"""
def __init__(self): # pylint: disable=super-init-not-called
"""c'tor"""
# Note: it is on purpose that the threading.local constructor is not called
# Dictionary, local to a thread, that will be used as such
self.cache = {}
class MockLockRing(object):
"""This mock class is just used to expose the acquire and release method"""
def doNothing(self, *args, **kwargs):
"""Really does nothing !"""
pass
acquire = release = doNothing
class DictCache(object):
"""DictCache is a generic cache implementation.
The user can decide whether this cache should be shared among the threads or not, but it is always thread safe
Note that when shared, the access to the cache is protected by a lock, but not necessarily the
object you are retrieving from it.
"""
def __init__(self, deleteFunction=False, threadLocal=False):
"""Initialize the dict cache.
:param deleteFunction: if not False, invoked when deleting a cached object
:param threadLocal: if False, the cache will be shared among all the threads, otherwise,
each thread gets its own cache.
"""
self.__threadLocal = threadLocal
# Placeholder either for a LockRing if the cache is shared,
# or a mock class if not.
self.__lock = None
# One of the following two objects is returned
# by the __cache property, depending on the threadLocal strategy
# This is the Placeholder for a shared cache
self.__sharedCache = {}
# This is the Placeholder for a shared cache
self.__threadLocalCache = ThreadLocalDict()
# Function to clean the elements
self.__deleteFunction = deleteFunction
@property
def lock(self):
"""Return the lock.
In practice, if the cache is shared among threads, it is a LockRing.
Otherwise, it is just a mock object.
"""
if not self.__lock:
if not self.__threadLocal:
self.__lock = LockRing().getLock(self.__class__.__name__, recursive=True)
else:
self.__lock = MockLockRing()
return self.__lock
@property
def __cache(self):
"""Returns either a shared or a thread local cache.
In any case, the returned object is a dictionary
"""
if self.__threadLocal:
return self.__threadLocalCache.cache
return self.__sharedCache
def exists(self, cKey, validSeconds=0):
"""Returns True/False if the key exists for the given number of seconds
:param cKey: identification key of the record
:param int validSeconds: The amount of seconds the key has to be valid for
:return: bool
"""
self.lock.acquire()
try:
# Is the key in the cache?
if cKey in self.__cache:
expTime = self.__cache[cKey]["expirationTime"]
# If it's valid return True!
if expTime > datetime.datetime.now() + datetime.timedelta(seconds=validSeconds):
return True
else:
# Delete expired
self.delete(cKey)
return False
finally:
self.lock.release()
def delete(self, cKey):
"""Delete a key from the cache
:param cKey: identification key of the record
"""
self.lock.acquire()
try:
if cKey not in self.__cache:
return
if self.__deleteFunction:
self.__deleteFunction(self.__cache[cKey]["value"])
del self.__cache[cKey]
finally:
self.lock.release()
def add(self, cKey, validSeconds, value=None):
"""Add a record to the cache
:param cKey: identification key of the record
:param int validSeconds: valid seconds of this record
:param value: value of the record
"""
if max(0, validSeconds) == 0:
return
self.lock.acquire()
try:
vD = {"expirationTime": datetime.datetime.now() + datetime.timedelta(seconds=validSeconds), "value": value}
self.__cache[cKey] = vD
finally:
self.lock.release()
def get(self, cKey, validSeconds=0):
"""Get a record from the cache
:param cKey: identification key of the record
:param int validSeconds: The amount of seconds the key has to be valid for
:return: None or value of key
"""
self.lock.acquire()
try:
# Is the key in the cache?
if cKey in self.__cache:
expTime = self.__cache[cKey]["expirationTime"]
# If it's valid return True!
if expTime > datetime.datetime.now() + datetime.timedelta(seconds=validSeconds):
return self.__cache[cKey]["value"]
else:
# Delete expired
self.delete(cKey)
return None
finally:
self.lock.release()
def showContentsInString(self):
"""Return a human readable string to represent the contents
:return: str
"""
self.lock.acquire()
try:
data = []
for cKey in self.__cache:
data.append("%s:" % str(cKey))
data.append("\tExp: %s" % self.__cache[cKey]["expirationTime"])
if self.__cache[cKey]["value"]:
data.append("\tVal: %s" % self.__cache[cKey]["value"])
return "\n".join(data)
finally:
self.lock.release()
def getKeys(self, validSeconds=0):
"""Get keys for all contents
:param int validSeconds: valid time in seconds
:return: list
"""
self.lock.acquire()
try:
keys = []
limitTime = datetime.datetime.now() + datetime.timedelta(seconds=validSeconds)
for cKey in self.__cache:
if self.__cache[cKey]["expirationTime"] > limitTime:
keys.append(cKey)
return keys
finally:
self.lock.release()
def purgeExpired(self, expiredInSeconds=0):
"""Purge all entries that are expired or will be expired in <expiredInSeconds>
:param int expiredInSeconds: expired time in a seconds
"""
self.lock.acquire()
try:
keys = []
limitTime = datetime.datetime.now() + datetime.timedelta(seconds=expiredInSeconds)
for cKey in self.__cache:
if self.__cache[cKey]["expirationTime"] < limitTime:
keys.append(cKey)
for cKey in keys:
if self.__deleteFunction:
self.__deleteFunction(self.__cache[cKey]["value"])
del self.__cache[cKey]
finally:
self.lock.release()
def purgeAll(self, useLock=True):
"""Purge all entries
CAUTION: useLock parameter should ALWAYS be True except when called from __del__
:param bool useLock: use lock
"""
if useLock:
self.lock.acquire()
try:
for cKey in list(self.__cache):
if self.__deleteFunction:
self.__deleteFunction(self.__cache[cKey]["value"])
del self.__cache[cKey]
finally:
if useLock:
self.lock.release()
def __del__(self):
"""When the DictCache is deleted, all the entries should be purged.
This is particularly useful when the DictCache manages files
CAUTION: if you carefully read the python doc, you will see all the
caveat of __del__. In particular, no guaranty that it is called...
(https://docs.python.org/2/reference/datamodel.html#object.__del__)
"""
self.purgeAll(useLock=False)
del self.__lock
if self.__threadLocal:
del self.__threadLocalCache
else:
del self.__sharedCache
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/DictCache.py
|
Python
|
gpl-3.0
| 8,639
|
[
"DIRAC"
] |
fdf3a65ef01a4ff4503801fcf7ab1f4fa95f14d9ce9d8ac1d28f14fa6bfbbba5
|
# The MIT License (MIT)
#
# Copyright (c) 2016, The MITRE Corporation. All rights reserved.
#
# Approved for Public Release; Distribution Unlimited 14-1511
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://raw.githubusercontent.com/crits/crits_services/master/office_meta_service/office_meta.py
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
__author__ = "Mike Goffin"
__license__ = "MPL 2.0"
import sys
import time
import array
import hashlib
import binascii
import struct
import pprint
sys.setrecursionlimit(10000)
class OfficeParser(object):
summary_mapping = {
b"\xE0\x85\x9F\xF2\xF9\x4F\x68\x10\xAB\x91\x08\x00\x2B\x27\xB3\xD9": {
'name': 'SummaryInformation',
0x01: 'Codepage',
0x02: 'Title',
0x03: 'Subject',
0x04: 'Author',
0x05: 'Keywords',
0x06: 'Comments',
0x07: 'Template',
0x08: 'Last Saved By',
0x09: 'Revision Number',
0x0a: 'Total Edititing Time',
0x0b: 'Last printed Date',
0x0c: 'Creation Date',
0x0d: 'Last Saved Date',
0x0e: 'Number of Pages',
0x0f: 'Number of Words',
0x10: 'Number of Characters',
0x11: 'Thumbnail',
0x12: 'Name of Creating Appliction',
0x13: 'Security',
},
b"\x02\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE": {
'name': 'DocumentSummaryInformation',
0x01: 'Codepage',
0x02: 'Category',
0x03: 'Presentation Target',
0x04: 'Number of Bytes',
0x05: 'Number of Lines',
0x06: 'Number of Paragraphs',
0x07: 'Number of Slides',
0x08: 'Number of Notes',
0x09: 'Number of Hidden Slides',
0x0a: 'MMClips',
0x0b: 'ScaleCrops',
0x0c: 'HeadingPairs',
0x0d: 'Title of Parts',
0x0e: 'Manager',
0x0f: 'Company',
0x10: 'Links up to date'
},
b"\x05\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE": {
'name': 'Other DocumentSummaryInformation',
# what the heck do these values mean?
}
}
office_magic = b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1"
def __init__(self, data, verbose=False):
self.data = data
self.verbose = verbose
self.office_header = {}
self.directory = []
self.properties = []
self.fat_table = []
self.mini_fat_table = []
self.mini_fat_data = ''
self.sector_size = 512
def get_mini_fat_chain(self, sector):
if sector in [0xffffffff, 0xfffffffe]:
return ''
elif sector < len(self.mini_fat_table):
if self.mini_fat_table[sector] == 0xfffffffe:
return self.get_mini_fat_sector(sector)
elif sector != self.mini_fat_table[sector]:
data = self.get_mini_fat_sector(sector)
return data + self.get_mini_fat_chain(self.mini_fat_table[sector])
return ''
def get_mini_fat_sector(self, sector):
return self.mini_fat_data[(sector) * 64 : (sector + 1) * 64]
def get_fat_chain(self, sector):
if sector in [0xffffffff, 0xfffffffe]:
return ''
elif sector < len(self.fat_table):
if self.verbose:
print("request sector %d - len %d" % (sector, len(self.fat_table)))
if self.fat_table[sector] == 0xfffffffe:
return self.get_fat_sector(sector)
elif sector != self.fat_table[sector]:
data = self.get_fat_sector(sector)
return data + self.get_fat_chain(self.fat_table[sector])
return ''
def get_mini_fat_sector_chain(self, sector):
if sector in [0xffffffff, 0xfffffffe, 0xfffffffd]:
return []
elif sector < len(self.fat_table):
if self.fat_table[sector] == 0xfffffffe:
return [sector]
elif sector != self.fat_table[sector]:
return [sector] + self.get_mini_fat_sector_chain(self.fat_table[sector])
return []
def get_fat_sector(self, sector):
return self.data[(sector + 1) * self.sector_size : (sector+2) * self.sector_size]
def make_fat(self, sector_list):
fat = array.array('I')
if self.verbose:
print("sector_list = %s" % sector_list)
for sector in sector_list:
sect_data = (self.get_fat_sector(sector))
if len(sect_data) > 0:
fat += array.array('I', self.get_fat_sector(sector))
else:
if self.verbose:
print("!!!!! Error, invalid SAT table, sector missing")
return fat
def parse_office_header(self):
office_header = {
'magic': binascii.hexlify(self.data[:8]),
'clsid': binascii.hexlify(self.data[8:24]),
'min_ver': struct.unpack('H', self.data[24:26])[0],
'maj_ver': struct.unpack('H', self.data[26:28])[0],
'byte_order': struct.unpack('H', self.data[28:30])[0],
'sector_shift': struct.unpack('H', self.data[30:32])[0],
'mini_sector_shift': struct.unpack('H', self.data[32:34])[0],
'reserved': binascii.hexlify(self.data[34:40]),
'num_dir_sect': struct.unpack('I', self.data[40:44])[0],
'num_fat_sect': struct.unpack('I', self.data[44:48])[0],
'first_dir_sect': struct.unpack('I', self.data[48:52])[0],
'transaction_sig': struct.unpack('I', self.data[52:56])[0],
'mini_stream_cutoff': struct.unpack('I', self.data[56:60])[0],
'first_mini_fat_sect': struct.unpack('I', self.data[60:64])[0],
'num_mini_fat_sect': struct.unpack('I', self.data[64:68])[0],
'first_difat': struct.unpack('I', self.data[68:72])[0],
'num_difat': struct.unpack('I', self.data[72:76])[0],
'difat_0': struct.unpack('I', self.data[76:80])[0],
}
if self.verbose:
pprint.pprint(office_header)
if office_header['maj_ver'] in [3,4] and office_header['byte_order'] == 65534:
fat_array = array.array('I', self.data[76:76 + (4 * office_header['num_fat_sect'])])
self.fat_table = self.make_fat(fat_array)
if office_header['num_mini_fat_sect'] > 0:
mini_fat_sectors = self.get_mini_fat_sector_chain(office_header['first_mini_fat_sect'])
if mini_fat_sectors:
self.mini_fat_table = self.make_fat(mini_fat_sectors)
if self.verbose:
print("[+] FAT Tables")
print(self.fat_table)
print(self.mini_fat_table)
return office_header
def find_office_header(self):
offset = self.data.find(self.office_magic)
if offset >= 0:
if self.verbose:
print("\t[+] found office header at offset %04X" % offset)
self.data = self.data[offset:]
return offset
if self.verbose:
print("\t[-] could not find office header")
return None
def parse_property_set_header(self, prop_data):
if len(prop_data) >= 28:
system_values = {
0: 'Win16',
1: 'Macintosh',
2: 'Win32',
}
property_set_header = {
'byte_order': binascii.hexlify(prop_data[:2]),
'format': struct.unpack('H', prop_data[2:4])[0],
'system_version': struct.unpack('I', prop_data[4:8])[0],
'clsid': binascii.hexlify(prop_data[8:24]),
'num_properties': struct.unpack('I', prop_data[24:28])[0],
'property_list': [],
}
# provide a text string for the system value
if property_set_header['num_properties'] not in [1,2] or property_set_header['byte_order'] != b'feff':
if self.verbose:
print("[+] invalid property set record")
return property_set_header
property_set_header['system_name'] = system_values.get(property_set_header['system_version'], 'Unknown')
for i in range(property_set_header['num_properties']):
if len(prop_data) >= (28 + (i*20) + 20):
offset = (i * 20) + 28
prop = {
'clsid': binascii.hexlify(prop_data[offset:offset+16]),
'offset': struct.unpack('I', prop_data[offset+16:offset+20])[0],
}
property_set_header["property_list"].append(prop)
return property_set_header
return {}
def lookup_property_id(self, prop_id, prop_type):
table = self.summary_mapping.get(binascii.unhexlify(prop_type), {})
if table:
return table.get(prop_id, 'Unknown (%d)' % prop_id)
return 'Unknown'
def timestamp_string(self, wtimestamp):
timestamp = (wtimestamp / 10000000) - 11644473600
if timestamp > 0:
datestring = time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(timestamp))
else:
timestamp = (wtimestamp / 10000000)
datestring = "%02d:%02d:%02d" % (timestamp / 360, timestamp / 60, timestamp % 60)
return (timestamp, datestring)
def parse_properties(self, prop_data, prop_type):
if len(prop_data) >= 8:
properties = {
'size': struct.unpack('I', prop_data[:4])[0],
'num_properties': struct.unpack('I', prop_data[4:8])[0],
'properties': [],
}
for i in range(properties['num_properties']):
offset = (i * 8) + 8
if len(prop_data) < (offset+8):
break
prop = {
'id': struct.unpack('I', prop_data[offset:offset+4])[0],
'offset': struct.unpack('I', prop_data[offset+4:offset+8])[0],
}
offset = prop['offset']
if offset >= len(prop_data):
continue
prop['name'] = self.lookup_property_id(prop['id'], prop_type)
prop['type'] = struct.unpack('I', prop_data[offset:offset+4])[0]
if prop['type'] == 0x02:
prop['value'] = struct.unpack('h', prop_data[offset+4:offset+6])[0]
elif prop['type'] == 0x03:
prop['value'] = struct.unpack('i', prop_data[offset+4:offset+8])[0]
elif prop['type'] == 0x1e:
prop['str_len'] = struct.unpack('i', prop_data[offset+4:offset+8])[0]
if properties.get('code_page', 0) == 0x4b0:
prop['value'] = prop_data[offset+8:offset+8+prop['str_len']-2]
else:
prop['value'] = prop_data[offset+8:offset+8+prop['str_len']-1].replace(b'\x00', b'')
elif prop['type'] == 0x0b:
prop['value'] = binascii.hexlify(prop_data[offset+4:offset+8])
elif prop['type'] == 0x40:
prop['value'] = struct.unpack('Q', prop_data[offset+4:offset+12])[0]
(prop['timestamp'], prop['date']) = self.timestamp_string(prop['value'])
elif prop['type'] == 0x1f:
prop['str_len'] = struct.unpack('I', prop_data[offset+4:offset+8])[0]
prop['value'] = prop_data[offset+8: offset+8+(prop['str_len'] * 2)]
else:
prop['value'] = binascii.hexlify(prop_data[:8])
if isinstance(prop['value'], str):
prop['value'] = prop['value'].decode('cp1252').encode('utf-8')
elif isinstance(prop['value'], bytes):
prop['value'] = prop['value'].decode('utf-8')
elif isinstance(prop['value'], int):
prop['value'] = str(prop['value'])
prop['result'] = "%s: %s" % (prop['name'], prop['value'])
if prop['id'] == 0x01:
properties['code_page'] = prop['result']
properties['properties'].append(prop)
return properties
return {}
def parse_summary_information(self, summary_data, prop_type):
if self.verbose:
print("\t[+] parsing %d bytes of summary_data for %s" % (len(summary_data), prop_type))
if len(summary_data) >= 28:
property_set_header = self.parse_property_set_header(summary_data)
for item in property_set_header["property_list"]:
item['properties'] = self.parse_properties(summary_data[item["offset"]:], item["clsid"])
if self.verbose:
pprint.pprint(item)
return property_set_header
return {}
def parse_directory(self, data):
if len(data) >= 128:
#if data[:8] == '\x00\x10\x00\x00\x00\x00\x00\x00':
# print "trucating first 8 bytes"
# self.parse_directory(data[8:])
entry = {
'name': data[:64],
'name_len': struct.unpack('H', data[64:66])[0],
'object_type': struct.unpack('B', data[66:67])[0],
'color': struct.unpack('B', data[67:68])[0],
'left_sibling': struct.unpack('I', data[68:72])[0],
'right_sibling': struct.unpack('I', data[72:76])[0],
'child': struct.unpack('I', data[76:80])[0],
'clsid': binascii.hexlify(data[80:96]),
'state': struct.unpack('I', data[96:100])[0],
'create_time': struct.unpack('Q', data[100:108])[0],
'modify_time': struct.unpack('Q', data[108:116])[0],
'start_sect': struct.unpack('I', data[116:120])[0],
'stream_size': struct.unpack('Q', data[120:128])[0],
}
# /version 3 limits this field to 32 bits
if self.office_header['maj_ver'] == 3:
entry['stream_size'] = entry['stream_size'] & 0x7fffffff
# fix up the name to a normalized ascii name for display
name_len = entry['name_len'] - 2
norm_name = entry['name'][:name_len].replace(b'\x00', b'')
# check for known subtype headers indicating special objects
if len(norm_name) >= 1:
if norm_name[0:1] in [b'\x01', b'\x03', b'\x05']:
norm_name = norm_name[1:]
entry['norm_name'] = norm_name
entry['result'] = norm_name
# fetch any directory data if available
if entry['object_type'] == 0x05:
dir_data = self.get_fat_chain(entry['start_sect'])
self.mini_fat_data = dir_data
elif entry['stream_size'] > 0 and entry['stream_size'] < self.office_header['mini_stream_cutoff']:
dir_data = self.get_mini_fat_chain(entry['start_sect'])
elif entry['stream_size'] >= self.office_header['mini_stream_cutoff']:
dir_data = self.get_fat_chain(entry['start_sect'])
else:
dir_data = ''
if self.verbose:
print("[+] got %d data from %s" % (len(dir_data), entry['result']))
# check the directory specific content and parse
if entry['object_type'] in [0,2] and len(dir_data) > 0:
for clsid in list(self.summary_mapping.keys()):
if clsid in dir_data:
self.properties.append(self.parse_summary_information(dir_data, clsid))
if self.verbose:
print(self.properties)
if len(dir_data) > 0:
entry['md5'] = hashlib.md5(dir_data).hexdigest()
entry['data'] = dir_data
if self.verbose:
pprint.pprint(entry)
self.directory.append(entry)
self.parse_directory(data[128:])
return {}
def pretty_print(self):
print("\nDocument Summary\n" + "-" * 40)
print("%20s:%20s" % ("Magic", self.office_header['magic']))
print("%20s:%20s" % ("Version", "%d.%d" % (self.office_header['maj_ver'], self.office_header['min_ver'])))
print("\nDirectories\n" + "-" * 40)
for directory in self.directory:
if len(directory['norm_name']) > 0:
print("\t%40s - %10d - %32s" % (directory.get('norm_name', ''), directory.get('stream_size', 0), directory.get('md5', 0)))
print("\nProperties\n" + "-" * 40)
for prop_list in self.properties:
for prop in prop_list['property_list']:
prop_summary = self.summary_mapping.get(binascii.unhexlify(prop['clsid']), {})
prop_name = prop_summary.get('name', 'Unknown')
print("\n\t%s" % prop_name)
if len(prop.get('properties', [])) > 0:
if len(prop['properties'].get('properties', [])) > 0:
for item in prop['properties']['properties']:
value = item.get('date', item['value'])
print("%50s - %40s" % (item['name'], value))
def parse_office_doc(self):
if (self.find_office_header() == None):
return None
self.office_header = self.parse_office_header()
if self.office_header['maj_ver'] in [3,4]:
self.parse_directory(self.get_fat_chain(self.office_header['first_dir_sect']))
|
awest1339/multiscanner
|
libs/office_meta.py
|
Python
|
mpl-2.0
| 19,544
|
[
"FEFF"
] |
07946360d888c38b89ee17e862448a3febc91067de8a31f303780ff1cc70135a
|
import numpy as np
import sys
import os
import time
from ase.units import Bohr
from ase.structure import bulk
from gpaw import GPAW
from gpaw.atom.basis import BasisMaker
from gpaw.response.df import DF
from gpaw.mpi import serial_comm, rank, size
from gpaw.utilities import devnull
if rank != 0:
sys.stdout = devnull
assert size <= 4**3
# Ground state calculation
t1 = time.time()
a = 4.043
atoms = bulk('Al', 'fcc', a=a)
atoms.center()
calc = GPAW(h=0.2,
kpts=(4,4,4),
mode='lcao',
basis='dzp',
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al.gpw','all')
t2 = time.time()
# Excited state calculation
q = np.array([1/4.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al.gpw', q=q, w=w, eta=0.2, ecut=50)
df1, df2 = df.get_dielectric_function()
#df.write('Al.pckl')
df.get_EELS_spectrum(df1, df2,filename='EELS_Al_lcao')
df.check_sum_rule(df1, df2)
t3 = time.time()
print 'For ground state calc, it took', (t2 - t1) / 60, 'minutes'
print 'For excited state calc, it took', (t3 - t2) / 60, 'minutes'
d = np.loadtxt('EELS_Al_lcao')
wpeak = 16.9 # eV
Nw = 169
if d[Nw, 1] > d[Nw-1, 1] and d[Nw, 2] > d[Nw+1, 2]:
pass
else:
raise ValueError('Plasmon peak not correct ! ')
if (np.abs(d[Nw, 1] - 19.7274875955) > 1e-3
or np.abs(d[Nw, 2] - 18.9147047194) > 1e-3):
print d[Nw, 1], d[Nw, 2]
raise ValueError('Please check spectrum strength ! ')
|
qsnake/gpaw
|
gpaw/test/aluminum_EELS_lcao.py
|
Python
|
gpl-3.0
| 1,532
|
[
"ASE",
"GPAW"
] |
143d684d8e6b9d0c2b32a94ced3bdbabf880d33bda5e0b88726c5094443dd11f
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Donald N. Allingham
# Copyright (C) 2008 Brian Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id$
"Attach Source Tool"
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import time
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gui.plug.tool import Tool
from gramps.gui.plug import MenuToolOptions, PluginWindows
from gramps.gen.plug.menu import StringOption, FilterOption, PersonOption, \
EnumeratedListOption
import gramps.gen.lib
from gramps.gen.db import DbTxn
import gramps.gen.plug.report.utils as ReportUtils
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
#------------------------------------------------------------------------
#
# Tool Classes
#
#------------------------------------------------------------------------
class AttachSourceOptions(MenuToolOptions):
""" Attach Source options """
def __init__(self, name, person_id=None, dbstate=None):
self.__db = dbstate.get_database()
MenuToolOptions.__init__(self, name, person_id, dbstate)
def add_menu_options(self, menu):
""" Add the options """
category_name = _("Options")
self.__filter = FilterOption(_("Person Filter"), 0)
self.__filter.set_help(_("Select filter to restrict people"))
menu.add_option(category_name, "filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
source_type = EnumeratedListOption(_("Source type"), 0)
source_type.add_item(0, _("New source"))
source_type.add_item(1, _("Existing source"))
source_type.set_help(_("Select the type of source to attach"))
menu.add_option(category_name, "source_type", source_type)
source_type.connect('value-changed', self.__update_source_type)
self.__source_type = source_type
source_text = StringOption(_("New Source Title"), "")
source_text.set_help(_("Text of source to attach"))
menu.add_option(category_name, "source_text", source_text)
self.__source_text = source_text
source_id = StringOption(_("Existing Source ID"), "")
source_id.set_help(_("ID of source to attach"))
menu.add_option(category_name, "source_id", source_id)
self.__source_id = source_id
self.__update_source_type()
def __update_source_type(self):
"""
Update the options based on the selected source type
"""
sid = self.__source_type.get_value()
if sid == 0:
self.__source_text.set_available(True)
self.__source_id.set_available(False)
else:
self.__source_text.set_available(False)
self.__source_id.set_available(True)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = ReportUtils.get_person_filters(person, False)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value in [1, 2, 3, 4]:
# Filters 0, 2, 3, 4 and 5 rely on the center person
self.__pid.set_available(True)
else:
# The rest don't
self.__pid.set_available(False)
class AttachSourceWindow(PluginWindows.ToolManagedWindowBatch):
def get_title(self):
return _("Attach Source")
def initial_frame(self):
return _("Options")
def run(self):
self.skeys = {}
source_type = self.options.handler.options_dict['source_type']
# 0 - new, 1 - lookup
if source_type == 0:
source_text = self.options.handler.options_dict['source_text']
else:
source_id = self.options.handler.options_dict['source_id']
source = self.db.get_source_from_gramps_id(source_id)
if source is None:
# FIXME: show an error message
return
with DbTxn(_("Attach Source"), self.db, batch=True) as self.trans:
self.add_results_frame(_("Results"))
self.results_write(_("Processing...\n"))
self.db.disable_signals()
if source_type == 0:
source = self.create_source(source_text)
self.filter_option = self.options.menu.get_option_by_name('filter')
self.filter = self.filter_option.get_filter() # the actual filter
# FIXME: use old style for gramps31 compatible
# people = self.filter.apply(self.db,
# self.db.iter_person_handles())
people = self.filter.apply(self.db,
self.db.get_person_handles(sort_handles=False))
# FIXME: use old style for gramps31 compatible
# num_people = self.db.get_number_of_people()
num_people = len(people)
self.results_write(_("Attaching sources...\n"))
self.progress.set_pass(_('Attaching sources...'),
num_people)
count = 1
for person_handle in people:
self.progress.step()
person = self.db.get_person_from_handle(person_handle)
citation = gramps.gen.lib.Citation()
citation.set_reference_handle(source.handle)
self.db.add_citation(citation, self.trans)
self.db.commit_citation(citation, self.trans)
person.add_citation(citation.handle)
self.db.commit_person(person, self.trans)
self.results_write(" %d) " % count)
self.results_write_link(name_displayer.display(person),
person, person_handle)
self.results_write("\n")
count += 1
self.db.enable_signals()
self.db.request_rebuild()
self.results_write(_("Done!\n"))
def create_source(self, source_text):
source = None
if source_text in self.skeys:
source = self.db.get_source_from_handle(self.skeys[source_text])
else:
source = gramps.gen.lib.Source()
source.set_title(source_text)
self.db.add_source(source,self.trans)
self.db.commit_source(source,self.trans)
self.skeys[source_text] = source.handle
self.db.add_source(source, self.trans)
return source
self.db.add_event(event, self.trans)
return event
|
sam-m888/addons-source
|
AttachSourceTool/AttachSourceTool.py
|
Python
|
gpl-2.0
| 8,173
|
[
"Brian"
] |
e0f8f692013df514b850424f0ff5a239e9d0134f25f2901bbfc481df20b519ce
|
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# IPython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 4
_version_minor = 1
_version_patch = 0
_version_extra = 'dev'
# _version_extra = 'rc1'
# _version_extra = '' # Uncomment this for full releases
# release.codename is deprecated in 2.0, will be removed in 3.0
codename = ''
# Construct full version string from these.
_ver = [_version_major, _version_minor, _version_patch]
__version__ = '.'.join(map(str, _ver))
if _version_extra:
__version__ = __version__ + '-' + _version_extra
version = __version__ # backwards compatibility name
version_info = (_version_major, _version_minor, _version_patch, _version_extra)
# Change this when incrementing the kernel protocol version
kernel_protocol_version_info = (5, 0)
kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
description = "IPython: Productive Interactive Computing"
long_description = \
"""
IPython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
* A powerful interactive Python shell
* A `Jupyter <http://jupyter.org/>`_ kernel to work with Python code in Jupyter
notebooks and other interactive frontends.
The enhanced interactive Python shells have the following main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Extensible tab completion, with support by default for completion of python
variables and keywords, filenames and function keywords.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* A rich configuration system with easy switching between different setups
(simpler than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs and GUIs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available from IPython's `GitHub
site <http://github.com/ipython>`_.
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@scipy.org'
url = 'http://ipython.org'
download_url = 'https://github.com/ipython/ipython/downloads'
platforms = ['Linux','Mac OSX','Windows XP/Vista/7/8']
keywords = ['Interactive','Interpreter','Shell','Parallel','Distributed',
'Web-based computing', 'Qt console', 'Embedding']
classifiers = [
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Shells'
]
|
lifeinoppo/littlefishlet-scode
|
RES/REF/python_sourcecode/ipython-master/IPython/core/release.py
|
Python
|
gpl-2.0
| 4,541
|
[
"Brian"
] |
669647d079e44610c3197ea9b6966bd3e44a1ce395dcdd3d95b3ac434a1d5755
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# user - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
#
# User class used for simulating users that randomly submit jobs
#
import random
class User:
id = ''
submit_prob = None
logger = None
server = None
maxprice = None
length = 2
jobs = 0
def __init__(
self,
id,
logger,
prob,
price,
server,
vgrid,
):
self.id = id
self.logger = logger
self.submit_prob = prob
self.maxprice = price
self.server = server
self.vgrid = vgrid
def submit_job(self, step):
self.logger.info('%s submitting job with maxprice %s to %s in step %d'
, self.id, self.maxprice, self.server.id, step)
name = '%s' % self.id
self.server.submit(name, self.length, self.maxprice, self.vgrid)
self.jobs += 1
def sleep(self):
self.logger.debug('%s sleeping', self.id)
def simulate(self, timestep):
# Randomly submit a job during given timestep
rand = random.random()
qlen = self.server.job_queue.queue_length()
if rand <= self.submit_prob and qlen < 200:
self.submit_job(timestep)
else:
self.sleep()
|
heromod/migrid
|
mig/simulation/user.py
|
Python
|
gpl-2.0
| 2,151
|
[
"Brian"
] |
ffc24f61194ba2be767061999acc146ef1ec5d2e37c2a234cdafa8af94db306b
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************
espressopp.integrator.DPDThermostat
***********************************
.. function:: espressopp.integrator.DPDThermostat(system, vl)
:param system:
:param vl:
:type system:
:type vl:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_DPDThermostat
class DPDThermostatLocal(ExtensionLocal, integrator_DPDThermostat):
def __init__(self, system, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_DPDThermostat, system, vl)
#def enableAdress(self):
# if pmi.workerIsActive():
# self.cxxclass.enableAdress(self);
if pmi.isController :
class DPDThermostat(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.DPDThermostatLocal',
pmiproperty = [ 'gamma', 'tgamma', 'temperature' ]
)
|
fedepad/espressopp
|
src/integrator/DPDThermostat.py
|
Python
|
gpl-3.0
| 1,933
|
[
"ESPResSo"
] |
6c1038ca33c767c651c312155feb855a8ab40a53bd292efaad7cf2c4158cca5e
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkSuperquadricSource(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkSuperquadricSource(), 'Processing.',
(), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkSuperquadricSource.py
|
Python
|
bsd-3-clause
| 483
|
[
"VTK"
] |
8b9980f703d1c687cb0c92ce4ebf4b6beb1a5aaa5af18f80f79f9fe222a4636c
|
"""
"""
from abc import ABCMeta, abstractmethod
PULSAR_UNKNOWN_RETURN_CODE = '__unknown__'
class ManagerInterface:
"""
Defines the interface to various job managers.
"""
__metaclass__ = ABCMeta
@abstractmethod
def setup_job(self, input_job_id, tool_id, tool_version):
"""
Setup a job directory for specified input (galaxy) job id, tool id,
and tool version.
"""
@abstractmethod
def clean(self, job_id):
"""
Delete job directory and clean up resources associated with job with
id `job_id`.
"""
@abstractmethod
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
"""
Called to indicate that the client is ready for this job with specified
job id and command line to be executed (i.e. run or queue this job
depending on implementation).
"""
@abstractmethod
def get_status(self, job_id):
"""
Return status of job as string, currently supported statuses include
'cancelled', 'running', 'queued', and 'complete'.
"""
@abstractmethod
def return_code(self, job_id):
"""
Return integer indicating return code of specified execution or
PULSAR_UNKNOWN_RETURN_CODE.
"""
@abstractmethod
def stdout_contents(self, job_id):
"""
After completion, return contents of stdout associated with specified
job.
"""
@abstractmethod
def stderr_contents(self, job_id):
"""
After completion, return contents of stderr associated with specified
job.
"""
@abstractmethod
def kill(self, job_id):
"""
End or cancel execution of the specified job.
"""
@abstractmethod
def job_directory(self, job_id):
""" Return a JobDirectory abstraction describing the state of the
job working directory.
"""
class ManagerProxy:
"""
Subclass to build override proxy a manager and override specific
functionality.
"""
def __init__(self, manager):
self._proxied_manager = manager
def setup_job(self, *args, **kwargs):
return self._proxied_manager.setup_job(*args, **kwargs)
def clean(self, *args, **kwargs):
return self._proxied_manager.clean(*args, **kwargs)
def launch(self, *args, **kwargs):
return self._proxied_manager.launch(*args, **kwargs)
def get_status(self, *args, **kwargs):
return self._proxied_manager.get_status(*args, **kwargs)
def return_code(self, *args, **kwargs):
return self._proxied_manager.return_code(*args, **kwargs)
def stdout_contents(self, *args, **kwargs):
return self._proxied_manager.stdout_contents(*args, **kwargs)
def stderr_contents(self, *args, **kwargs):
return self._proxied_manager.stderr_contents(*args, **kwargs)
def kill(self, *args, **kwargs):
return self._proxied_manager.kill(*args, **kwargs)
def shutdown(self, timeout=None):
""" Optional. """
try:
shutdown_method = self._proxied_manager.shutdown
except AttributeError:
return
shutdown_method(timeout)
def job_directory(self, *args, **kwargs):
return self._proxied_manager.job_directory(*args, **kwargs)
def system_properties(self):
return self._proxied_manager.system_properties()
@property
def object_store(self):
return self._proxied_manager.object_store
def __str__(self):
return "ManagerProxy[manager=%s]" % str(self._proxied_manager)
|
galaxyproject/pulsar
|
pulsar/managers/__init__.py
|
Python
|
apache-2.0
| 3,684
|
[
"Galaxy"
] |
14783136daad42d869d0874d9b80ff17cdce8dcb1e3c4aa6bfa812f5ca89fd2b
|
# -*- coding: utf-8 -*-
# messages.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
LeapMessage and MessageCollection.
"""
import copy
import logging
import re
import threading
import StringIO
from collections import defaultdict
from email import message_from_string
from functools import partial
from pycryptopp.hash import sha256
from twisted.mail import imap4
from twisted.internet import defer, reactor
from zope.interface import implements
from zope.proxy import sameProxiedObjects
from leap.common.check import leap_assert, leap_assert_type
from leap.common.decorators import memoized_method
from leap.common.mail import get_email_charset
from leap.mail import walk
from leap.mail.utils import first, find_charset, lowerdict, empty
from leap.mail.utils import stringify_parts_map
from leap.mail.decorators import deferred_to_thread
from leap.mail.imap.index import IndexedDB
from leap.mail.imap.fields import fields, WithMsgFields
from leap.mail.imap.memorystore import MessageWrapper
from leap.mail.imap.messageparts import MessagePart, MessagePartDoc
from leap.mail.imap.parser import MBoxParser
logger = logging.getLogger(__name__)
# TODO ------------------------------------------------------------
# [ ] Add ref to incoming message during add_msg
# [ ] Add linked-from info.
# * Need a new type of documents: linkage info.
# * HDOCS are linked from FDOCs (ref to chash)
# * CDOCS are linked from HDOCS (ref to chash)
# [ ] Delete incoming mail only after successful write!
# [ ] Remove UID from syncable db. Store only those indexes locally.
MSGID_PATTERN = r"""<([\w@.]+)>"""
MSGID_RE = re.compile(MSGID_PATTERN)
def try_unique_query(curried):
"""
Try to execute a query that is expected to have a
single outcome, and log a warning if more than one document found.
:param curried: a curried function
:type curried: callable
"""
leap_assert(callable(curried), "A callable is expected")
try:
query = curried()
if query:
if len(query) > 1:
# TODO we could take action, like trigger a background
# process to kill dupes.
name = getattr(curried, 'expected', 'doc')
logger.warning(
"More than one %s found for this mbox, "
"we got a duplicate!!" % (name,))
return query.pop()
else:
return None
except Exception as exc:
logger.exception("Unhandled error %r" % exc)
"""
A dictionary that keeps one lock per mbox and uid.
"""
# XXX too much overhead?
fdoc_locks = defaultdict(lambda: defaultdict(lambda: threading.Lock()))
class LeapMessage(fields, MBoxParser):
"""
The main representation of a message.
It indexes the messages in one mailbox by a combination
of uid+mailbox name.
"""
# TODO this has to change.
# Should index primarily by chash, and keep a local-only
# UID table.
implements(imap4.IMessage)
def __init__(self, soledad, uid, mbox, collection=None, container=None):
"""
Initializes a LeapMessage.
:param soledad: a Soledad instance
:type soledad: Soledad
:param uid: the UID for the message.
:type uid: int or basestring
:param mbox: the mbox this message belongs to
:type mbox: str or unicode
:param collection: a reference to the parent collection object
:type collection: MessageCollection
:param container: a IMessageContainer implementor instance
:type container: IMessageContainer
"""
self._soledad = soledad
self._uid = int(uid) if uid is not None else None
self._mbox = self._parse_mailbox_name(mbox)
self._collection = collection
self._container = container
self.__chash = None
self.__bdoc = None
self.reactor = reactor
# XXX make these properties public
@property
def fdoc(self):
"""
An accessor to the flags document.
"""
if all(map(bool, (self._uid, self._mbox))):
fdoc = None
if self._container is not None:
fdoc = self._container.fdoc
if not fdoc:
fdoc = self._get_flags_doc()
if fdoc:
fdoc_content = fdoc.content
self.__chash = fdoc_content.get(
fields.CONTENT_HASH_KEY, None)
return fdoc
@property
def hdoc(self):
"""
An accessor to the headers document.
"""
container = self._container
if container is not None:
hdoc = self._container.hdoc
if hdoc and not empty(hdoc.content):
return hdoc
hdoc = self._get_headers_doc()
if container and not empty(hdoc.content):
# mem-cache it
hdoc_content = hdoc.content
chash = hdoc_content.get(fields.CONTENT_HASH_KEY)
hdocs = {chash: hdoc_content}
container.memstore.load_header_docs(hdocs)
return hdoc
@property
def chash(self):
"""
An accessor to the content hash for this message.
"""
if not self.fdoc:
return None
if not self.__chash and self.fdoc:
self.__chash = self.fdoc.content.get(
fields.CONTENT_HASH_KEY, None)
return self.__chash
@property
def bdoc(self):
"""
An accessor to the body document.
"""
if not self.hdoc:
return None
if not self.__bdoc:
self.__bdoc = self._get_body_doc()
return self.__bdoc
# IMessage implementation
def getUID(self):
"""
Retrieve the unique identifier associated with this Message.
:return: uid for this message
:rtype: int
"""
return self._uid
def getFlags(self):
"""
Retrieve the flags associated with this Message.
:return: The flags, represented as strings
:rtype: tuple
"""
uid = self._uid
flags = set([])
fdoc = self.fdoc
if fdoc:
flags = set(fdoc.content.get(self.FLAGS_KEY, None))
msgcol = self._collection
# We treat the recent flag specially: gotten from
# a mailbox-level document.
if msgcol and uid in msgcol.recent_flags:
flags.add(fields.RECENT_FLAG)
if flags:
flags = map(str, flags)
return tuple(flags)
# setFlags not in the interface spec but we use it with store command.
def setFlags(self, flags, mode):
"""
Sets the flags for this message
:param flags: the flags to update in the message.
:type flags: tuple of str
:param mode: the mode for setting. 1 is append, -1 is remove, 0 set.
:type mode: int
"""
leap_assert(isinstance(flags, tuple), "flags need to be a tuple")
mbox, uid = self._mbox, self._uid
APPEND = 1
REMOVE = -1
SET = 0
with fdoc_locks[mbox][uid]:
doc = self.fdoc
if not doc:
logger.warning(
"Could not find FDOC for %r:%s while setting flags!" %
(mbox, uid))
return
current = doc.content[self.FLAGS_KEY]
if mode == APPEND:
newflags = tuple(set(tuple(current) + flags))
elif mode == REMOVE:
newflags = tuple(set(current).difference(set(flags)))
elif mode == SET:
newflags = flags
new_fdoc = {
self.FLAGS_KEY: newflags,
self.SEEN_KEY: self.SEEN_FLAG in newflags,
self.DEL_KEY: self.DELETED_FLAG in newflags}
self._collection.memstore.update_flags(mbox, uid, new_fdoc)
return map(str, newflags)
def getInternalDate(self):
"""
Retrieve the date internally associated with this message
According to the spec, this is NOT the date and time in the
RFC-822 header, but rather a date and time that reflects when the
message was received.
* In SMTP, date and time of final delivery.
* In COPY, internal date/time of the source message.
* In APPEND, date/time specified.
:return: An RFC822-formatted date string.
:rtype: str
"""
date = self.hdoc.content.get(fields.DATE_KEY, '')
return date
#
# IMessagePart
#
# XXX we should implement this interface too for the subparts
# so we allow nested parts...
def getBodyFile(self):
"""
Retrieve a file object containing only the body of this message.
:return: file-like object opened for reading
:rtype: StringIO
"""
def write_fd(body):
fd.write(body)
fd.seek(0)
return fd
# TODO refactor with getBodyFile in MessagePart
fd = StringIO.StringIO()
if self.bdoc is not None:
bdoc_content = self.bdoc.content
if empty(bdoc_content):
logger.warning("No BDOC content found for message!!!")
return write_fd("")
body = bdoc_content.get(self.RAW_KEY, "")
content_type = bdoc_content.get('content-type', "")
charset = find_charset(content_type)
if charset is None:
charset = self._get_charset(body)
try:
if isinstance(body, unicode):
body = body.encode(charset)
except UnicodeError as exc:
logger.error(
"Unicode error, using 'replace'. {0!r}".format(exc))
logger.debug("Attempted to encode with: %s" % charset)
body = body.encode(charset, 'replace')
finally:
return write_fd(body)
# We are still returning funky characters from here.
else:
logger.warning("No BDOC found for message.")
return write_fd("")
@memoized_method
def _get_charset(self, stuff):
"""
Gets (guesses?) the charset of a payload.
:param stuff: the stuff to guess about.
:type stuff: basestring
:returns: charset
"""
# XXX shouldn't we make the scope
# of the decorator somewhat more persistent?
# ah! yes! and put memory bounds.
return get_email_charset(stuff)
def getSize(self):
"""
Return the total size, in octets, of this message.
:return: size of the message, in octets
:rtype: int
"""
size = None
if self.fdoc is not None:
fdoc_content = self.fdoc.content
size = fdoc_content.get(self.SIZE_KEY, False)
else:
logger.warning("No FLAGS doc for %s:%s" % (self._mbox,
self._uid))
if not size:
# XXX fallback, should remove when all migrated.
size = self.getBodyFile().len
return size
def getHeaders(self, negate, *names):
"""
Retrieve a group of message headers.
:param names: The names of the headers to retrieve or omit.
:type names: tuple of str
:param negate: If True, indicates that the headers listed in names
should be omitted from the return value, rather
than included.
:type negate: bool
:return: A mapping of header field names to header field values
:rtype: dict
"""
# TODO split in smaller methods
# XXX refactor together with MessagePart method
headers = self._get_headers()
if not headers:
logger.warning("No headers found")
return {str('content-type'): str('')}
names = map(lambda s: s.upper(), names)
if negate:
cond = lambda key: key.upper() not in names
else:
cond = lambda key: key.upper() in names
if isinstance(headers, list):
headers = dict(headers)
# default to most likely standard
charset = find_charset(headers, "utf-8")
headers2 = dict()
for key, value in headers.items():
# twisted imap server expects *some* headers to be lowercase
# We could use a CaseInsensitiveDict here...
if key.lower() == "content-type":
key = key.lower()
if not isinstance(key, str):
key = key.encode(charset, 'replace')
if not isinstance(value, str):
value = value.encode(charset, 'replace')
if value.endswith(";"):
# bastards
value = value[:-1]
# filter original dict by negate-condition
if cond(key):
headers2[key] = value
return headers2
def _get_headers(self):
"""
Return the headers dict for this message.
"""
if self.hdoc is not None:
hdoc_content = self.hdoc.content
headers = hdoc_content.get(self.HEADERS_KEY, {})
return headers
else:
logger.warning(
"No HEADERS doc for msg %s:%s" % (
self._mbox,
self._uid))
def isMultipart(self):
"""
Return True if this message is multipart.
"""
if self.fdoc:
fdoc_content = self.fdoc.content
is_multipart = fdoc_content.get(self.MULTIPART_KEY, False)
return is_multipart
else:
logger.warning(
"No FLAGS doc for msg %s:%s" % (
self._mbox,
self._uid))
def getSubPart(self, part):
"""
Retrieve a MIME submessage
:type part: C{int}
:param part: The number of the part to retrieve, indexed from 0.
:raise IndexError: Raised if the specified part does not exist.
:raise TypeError: Raised if this message is not multipart.
:rtype: Any object implementing C{IMessagePart}.
:return: The specified sub-part.
"""
if not self.isMultipart():
raise TypeError
try:
pmap_dict = self._get_part_from_parts_map(part + 1)
except KeyError:
raise IndexError
return MessagePart(self._soledad, pmap_dict)
#
# accessors
#
def _get_part_from_parts_map(self, part):
"""
Get a part map from the headers doc
:raises: KeyError if key does not exist
:rtype: dict
"""
if not self.hdoc:
logger.warning("Tried to get part but no HDOC found!")
return None
hdoc_content = self.hdoc.content
pmap = hdoc_content.get(fields.PARTS_MAP_KEY, {})
# remember, lads, soledad is using strings in its keys,
# not integers!
return pmap[str(part)]
# XXX moved to memory store
# move the rest too. ------------------------------------------
def _get_flags_doc(self):
"""
Return the document that keeps the flags for this
message.
"""
result = {}
try:
flag_docs = self._soledad.get_from_index(
fields.TYPE_MBOX_UID_IDX,
fields.TYPE_FLAGS_VAL, self._mbox, str(self._uid))
result = first(flag_docs)
except Exception as exc:
# ugh! Something's broken down there!
logger.warning("ERROR while getting flags for UID: %s" % self._uid)
logger.exception(exc)
finally:
return result
# TODO move to soledadstore instead of accessing soledad directly
def _get_headers_doc(self):
"""
Return the document that keeps the headers for this
message.
"""
head_docs = self._soledad.get_from_index(
fields.TYPE_C_HASH_IDX,
fields.TYPE_HEADERS_VAL, str(self.chash))
return first(head_docs)
# TODO move to soledadstore instead of accessing soledad directly
def _get_body_doc(self):
"""
Return the document that keeps the body for this
message.
"""
hdoc_content = self.hdoc.content
body_phash = hdoc_content.get(
fields.BODY_KEY, None)
if not body_phash:
logger.warning("No body phash for this document!")
return None
# XXX get from memstore too...
# if memstore: memstore.get_phrash
# memstore should keep a dict with weakrefs to the
# phash doc...
if self._container is not None:
bdoc = self._container.memstore.get_cdoc_from_phash(body_phash)
if not empty(bdoc) and not empty(bdoc.content):
return bdoc
# no memstore, or no body doc found there
if self._soledad:
body_docs = self._soledad.get_from_index(
fields.TYPE_P_HASH_IDX,
fields.TYPE_CONTENT_VAL, str(body_phash))
return first(body_docs)
else:
logger.error("No phash in container, and no soledad found!")
def __getitem__(self, key):
"""
Return an item from the content of the flags document,
for convenience.
:param key: The key
:type key: str
:return: The content value indexed by C{key} or None
:rtype: str
"""
return self.fdoc.content.get(key, None)
def does_exist(self):
"""
Return True if there is actually a flags document for this
UID and mbox.
"""
return not empty(self.fdoc)
class MessageCollection(WithMsgFields, IndexedDB, MBoxParser):
"""
A collection of messages, surprisingly.
It is tied to a selected mailbox name that is passed to its constructor.
Implements a filter query over the messages contained in a soledad
database.
"""
# XXX this should be able to produce a MessageSet methinks
# could validate these kinds of objects turning them
# into a template for the class.
FLAGS_DOC = "FLAGS"
HEADERS_DOC = "HEADERS"
CONTENT_DOC = "CONTENT"
"""
RECENT_DOC is a document that stores a list of the UIDs
with the recent flag for this mailbox. It deserves a special treatment
because:
(1) it cannot be set by the user
(2) it's a flag that we set inmediately after a fetch, which is quite
often.
(3) we need to be able to set/unset it in batches without doing a single
write for each element in the sequence.
"""
RECENT_DOC = "RECENT"
"""
HDOCS_SET_DOC is a document that stores a set of the Document-IDs
(the u1db index) for all the headers documents for a given mailbox.
We use it to prefetch massively all the headers for a mailbox.
This is the second massive query, after fetching all the FLAGS, that
a MUA will do in a case where we do not have local disk cache.
"""
HDOCS_SET_DOC = "HDOCS_SET"
templates = {
# Message Level
FLAGS_DOC: {
fields.TYPE_KEY: fields.TYPE_FLAGS_VAL,
fields.UID_KEY: 1, # XXX moe to a local table
fields.MBOX_KEY: fields.INBOX_VAL,
fields.CONTENT_HASH_KEY: "",
fields.SEEN_KEY: False,
fields.DEL_KEY: False,
fields.FLAGS_KEY: [],
fields.MULTIPART_KEY: False,
fields.SIZE_KEY: 0
},
HEADERS_DOC: {
fields.TYPE_KEY: fields.TYPE_HEADERS_VAL,
fields.CONTENT_HASH_KEY: "",
fields.DATE_KEY: "",
fields.SUBJECT_KEY: "",
fields.HEADERS_KEY: {},
fields.PARTS_MAP_KEY: {},
},
CONTENT_DOC: {
fields.TYPE_KEY: fields.TYPE_CONTENT_VAL,
fields.PAYLOAD_HASH_KEY: "",
fields.LINKED_FROM_KEY: [],
fields.CTYPE_KEY: "", # should index by this too
# should only get inmutable headers parts
# (for indexing)
fields.HEADERS_KEY: {},
fields.RAW_KEY: "",
fields.PARTS_MAP_KEY: {},
fields.HEADERS_KEY: {},
fields.MULTIPART_KEY: False,
},
# Mailbox Level
RECENT_DOC: {
fields.TYPE_KEY: fields.TYPE_RECENT_VAL,
fields.MBOX_KEY: fields.INBOX_VAL,
fields.RECENTFLAGS_KEY: [],
},
HDOCS_SET_DOC: {
fields.TYPE_KEY: fields.TYPE_HDOCS_SET_VAL,
fields.MBOX_KEY: fields.INBOX_VAL,
fields.HDOCS_SET_KEY: [],
}
}
# Different locks for wrapping both the u1db document getting/setting
# and the property getting/settting in an atomic operation.
# TODO we would abstract this to a SoledadProperty class
_rdoc_lock = defaultdict(lambda: threading.Lock())
_rdoc_write_lock = defaultdict(lambda: threading.Lock())
_rdoc_read_lock = defaultdict(lambda: threading.Lock())
_rdoc_property_lock = defaultdict(lambda: threading.Lock())
_initialized = {}
def __init__(self, mbox=None, soledad=None, memstore=None):
"""
Constructor for MessageCollection.
On initialization, we ensure that we have a document for
storing the recent flags. The nature of this flag make us wanting
to store the set of the UIDs with this flag at the level of the
MessageCollection for each mailbox, instead of treating them
as a property of each message.
We are passed an instance of MemoryStore, the same for the
SoledadBackedAccount, that we use as a read cache and a buffer
for writes.
:param mbox: the name of the mailbox. It is the name
with which we filter the query over the
messages database.
:type mbox: str
:param soledad: Soledad database
:type soledad: Soledad instance
:param memstore: a MemoryStore instance
:type memstore: MemoryStore
"""
leap_assert(mbox, "Need a mailbox name to initialize")
leap_assert(mbox.strip() != "", "mbox cannot be blank space")
leap_assert(isinstance(mbox, (str, unicode)),
"mbox needs to be a string")
leap_assert(soledad, "Need a soledad instance to initialize")
# okay, all in order, keep going...
self.mbox = self._parse_mailbox_name(mbox)
# XXX get a SoledadStore passed instead
self._soledad = soledad
self.memstore = memstore
self.__rflags = None
if not self._initialized.get(mbox, False):
try:
self.initialize_db()
# ensure that we have a recent-flags doc
self._get_or_create_rdoc()
except Exception:
logger.debug("Error initializing %r" % (mbox,))
else:
self._initialized[mbox] = True
self.reactor = reactor
def _get_empty_doc(self, _type=FLAGS_DOC):
"""
Returns an empty doc for storing different message parts.
Defaults to returning a template for a flags document.
:return: a dict with the template
:rtype: dict
"""
if _type not in self.templates.keys():
raise TypeError("Improper type passed to _get_empty_doc")
return copy.deepcopy(self.templates[_type])
def _get_or_create_rdoc(self):
"""
Try to retrieve the recent-flags doc for this MessageCollection,
and create one if not found.
"""
# XXX should move this to memstore too
with self._rdoc_write_lock[self.mbox]:
rdoc = self._get_recent_doc_from_soledad()
if rdoc is None:
rdoc = self._get_empty_doc(self.RECENT_DOC)
if self.mbox != fields.INBOX_VAL:
rdoc[fields.MBOX_KEY] = self.mbox
self._soledad.create_doc(rdoc)
@deferred_to_thread
def _do_parse(self, raw):
"""
Parse raw message and return it along with
relevant information about its outer level.
This is done in a separate thread, and the callback is passed
to `_do_add_msg` method.
:param raw: the raw message
:type raw: StringIO or basestring
:return: msg, parts, chash, size, multi
:rtype: tuple
"""
msg = message_from_string(raw)
parts = walk.get_parts(msg)
size = len(raw)
chash = sha256.SHA256(raw).hexdigest()
multi = msg.is_multipart()
return msg, parts, chash, size, multi
def _populate_flags(self, flags, uid, chash, size, multi):
"""
Return a flags doc.
XXX Missing DOC -----------
"""
fd = self._get_empty_doc(self.FLAGS_DOC)
fd[self.MBOX_KEY] = self.mbox
fd[self.UID_KEY] = uid
fd[self.CONTENT_HASH_KEY] = chash
fd[self.SIZE_KEY] = size
fd[self.MULTIPART_KEY] = multi
if flags:
fd[self.FLAGS_KEY] = flags
fd[self.SEEN_KEY] = self.SEEN_FLAG in flags
fd[self.DEL_KEY] = self.DELETED_FLAG in flags
fd[self.RECENT_KEY] = True # set always by default
return fd
def _populate_headr(self, msg, chash, subject, date):
"""
Return a headers doc.
XXX Missing DOC -----------
"""
headers = defaultdict(list)
for k, v in msg.items():
headers[k].append(v)
# "fix" for repeated headers.
for k, v in headers.items():
newline = "\n%s: " % (k,)
headers[k] = newline.join(v)
lower_headers = lowerdict(headers)
msgid = first(MSGID_RE.findall(
lower_headers.get('message-id', '')))
hd = self._get_empty_doc(self.HEADERS_DOC)
hd[self.CONTENT_HASH_KEY] = chash
hd[self.HEADERS_KEY] = headers
hd[self.MSGID_KEY] = msgid
if not subject and self.SUBJECT_FIELD in headers:
hd[self.SUBJECT_KEY] = headers[self.SUBJECT_FIELD]
else:
hd[self.SUBJECT_KEY] = subject
if not date and self.DATE_FIELD in headers:
hd[self.DATE_KEY] = headers[self.DATE_FIELD]
else:
hd[self.DATE_KEY] = date
return hd
def _fdoc_already_exists(self, chash):
"""
Check whether we can find a flags doc for this mailbox with the
given content-hash. It enforces that we can only have the same maessage
listed once for a a given mailbox.
:param chash: the content-hash to check about.
:type chash: basestring
:return: False, if it does not exist, or UID.
"""
exist = False
exist = self.memstore.get_fdoc_from_chash(chash, self.mbox)
if not exist:
exist = self._get_fdoc_from_chash(chash)
if exist and exist.content is not None:
return exist.content.get(fields.UID_KEY, "unknown-uid")
else:
return False
def add_msg(self, raw, subject=None, flags=None, date=None,
notify_on_disk=False):
"""
Creates a new message document.
:param raw: the raw message
:type raw: str
:param subject: subject of the message.
:type subject: str
:param flags: flags
:type flags: list
:param date: the received date for the message
:type date: str
:return: a deferred that will be fired with the message
uid when the adding succeed.
:rtype: deferred
"""
if flags is None:
flags = tuple()
leap_assert_type(flags, tuple)
observer = defer.Deferred()
d = self._do_parse(raw)
d.addCallback(lambda result: self.reactor.callInThread(
self._do_add_msg, result, flags, subject, date,
notify_on_disk, observer))
return observer
# Called in thread
def _do_add_msg(self, parse_result, flags, subject,
date, notify_on_disk, observer):
"""
Helper that creates a new message document.
Here lives the magic of the leap mail. Well, in soledad, really.
See `add_msg` docstring for parameter info.
:param parse_result: a tuple with the results of `self._do_parse`
:type parse_result: tuple
:param observer: a deferred that will be fired with the message
uid when the adding succeed.
:type observer: deferred
"""
# TODO signal that we can delete the original message!-----
# when all the processing is done.
# TODO add the linked-from info !
# TODO add reference to the original message
msg, parts, chash, size, multi = parse_result
# check for uniqueness --------------------------------
# Watch out! We're reserving a UID right after this!
existing_uid = self._fdoc_already_exists(chash)
if existing_uid:
msg = self.get_msg_by_uid(existing_uid)
# We can say the observer that we're done
self.reactor.callFromThread(observer.callback, existing_uid)
msg.setFlags((fields.DELETED_FLAG,), -1)
return
# XXX get FUCKING UID from autoincremental table
uid = self.memstore.increment_last_soledad_uid(self.mbox)
# We can say the observer that we're done at this point, but
# before that we should make sure it has no serious consequences
# if we're issued, for instance, a fetch command right after...
# self.reactor.callFromThread(observer.callback, uid)
# if we did the notify, we need to invalidate the deferred
# so not to try to fire it twice.
# observer = None
fd = self._populate_flags(flags, uid, chash, size, multi)
hd = self._populate_headr(msg, chash, subject, date)
body_phash_fun = [walk.get_body_phash_simple,
walk.get_body_phash_multi][int(multi)]
body_phash = body_phash_fun(walk.get_payloads(msg))
parts_map = walk.walk_msg_tree(parts, body_phash=body_phash)
# add parts map to header doc
# (body, multi, part_map)
for key in parts_map:
hd[key] = parts_map[key]
del parts_map
hd = stringify_parts_map(hd)
# The MessageContainer expects a dict, one-indexed
cdocs = dict(enumerate(walk.get_raw_docs(msg, parts), 1))
self.set_recent_flag(uid)
msg_container = MessageWrapper(fd, hd, cdocs)
self.memstore.create_message(
self.mbox, uid, msg_container,
observer=observer, notify_on_disk=notify_on_disk)
#
# getters: specific queries
#
# recent flags
def _get_recent_flags(self):
"""
An accessor for the recent-flags set for this mailbox.
"""
# XXX check if we should remove this
if self.__rflags is not None:
return self.__rflags
if self.memstore is not None:
with self._rdoc_lock[self.mbox]:
rflags = self.memstore.get_recent_flags(self.mbox)
if not rflags:
# not loaded in the memory store yet.
# let's fetch them from soledad...
rdoc = self._get_recent_doc_from_soledad()
if rdoc is None:
return set([])
rflags = set(rdoc.content.get(
fields.RECENTFLAGS_KEY, []))
# ...and cache them now.
self.memstore.load_recent_flags(
self.mbox,
{'doc_id': rdoc.doc_id, 'set': rflags})
return rflags
def _set_recent_flags(self, value):
"""
Setter for the recent-flags set for this mailbox.
"""
if self.memstore is not None:
self.memstore.set_recent_flags(self.mbox, value)
recent_flags = property(
_get_recent_flags, _set_recent_flags,
doc="Set of UIDs with the recent flag for this mailbox.")
def _get_recent_doc_from_soledad(self):
"""
Get recent-flags document from Soledad for this mailbox.
:rtype: SoledadDocument or None
"""
curried = partial(
self._soledad.get_from_index,
fields.TYPE_MBOX_IDX,
fields.TYPE_RECENT_VAL, self.mbox)
curried.expected = "rdoc"
with self._rdoc_read_lock[self.mbox]:
return try_unique_query(curried)
# Property-set modification (protected by a different
# lock to give atomicity to the read/write operation)
def unset_recent_flags(self, uids):
"""
Unset Recent flag for a sequence of uids.
:param uids: the uids to unset
:type uid: sequence
"""
with self._rdoc_property_lock[self.mbox]:
self.recent_flags.difference_update(
set(uids))
# Individual flags operations
def unset_recent_flag(self, uid):
"""
Unset Recent flag for a given uid.
:param uid: the uid to unset
:type uid: int
"""
with self._rdoc_property_lock[self.mbox]:
self.recent_flags.difference_update(
set([uid]))
@deferred_to_thread
def set_recent_flag(self, uid):
"""
Set Recent flag for a given uid.
:param uid: the uid to set
:type uid: int
"""
with self._rdoc_property_lock[self.mbox]:
self.recent_flags = self.recent_flags.union(
set([uid]))
# individual doc getters, message layer.
def _get_fdoc_from_chash(self, chash):
"""
Return a flags document for this mailbox with a given chash.
:return: A SoledadDocument containing the Flags Document, or None if
the query failed.
:rtype: SoledadDocument or None.
"""
curried = partial(
self._soledad.get_from_index,
fields.TYPE_MBOX_C_HASH_IDX,
fields.TYPE_FLAGS_VAL, self.mbox, chash)
curried.expected = "fdoc"
fdoc = try_unique_query(curried)
if fdoc is not None:
return fdoc
else:
# probably this should be the other way round,
# ie, try fist on memstore...
cf = self.memstore._chash_fdoc_store
fdoc = cf[chash][self.mbox]
# hey, I just needed to wrap fdoc thing into
# a "content" attribute, look a better way...
if not empty(fdoc):
return MessagePartDoc(
new=None, dirty=None, part=None,
store=None, doc_id=None,
content=fdoc)
def _get_uid_from_msgidCb(self, msgid):
hdoc = None
curried = partial(
self._soledad.get_from_index,
fields.TYPE_MSGID_IDX,
fields.TYPE_HEADERS_VAL, msgid)
curried.expected = "hdoc"
hdoc = try_unique_query(curried)
# XXX this is only a quick hack to avoid regression
# on the "multiple copies of the draft" issue, but
# this is currently broken since it's not efficient to
# look for this. Should lookup better.
# FIXME!
if hdoc is not None:
hdoc_dict = hdoc.content
else:
hdocstore = self.memstore._hdoc_store
match = [x for _, x in hdocstore.items() if x['msgid'] == msgid]
hdoc_dict = first(match)
if hdoc_dict is None:
logger.warning("Could not find hdoc for msgid %s"
% (msgid,))
return None
msg_chash = hdoc_dict.get(fields.CONTENT_HASH_KEY)
fdoc = self._get_fdoc_from_chash(msg_chash)
if not fdoc:
logger.warning("Could not find fdoc for msgid %s"
% (msgid,))
return None
return fdoc.content.get(fields.UID_KEY, None)
@deferred_to_thread
def _get_uid_from_msgid(self, msgid):
"""
Return a UID for a given message-id.
It first gets the headers-doc for that msg-id, and
it found it queries the flags doc for the current mailbox
for the matching content-hash.
:return: A UID, or None
"""
# We need to wait a little bit, cause in some of the cases
# the query is received right after we've saved the document,
# and we cannot find it otherwise. This seems to be enough.
# XXX do a deferLater instead ??
# XXX is this working?
return self._get_uid_from_msgidCb(msgid)
@deferred_to_thread
def set_flags(self, mbox, messages, flags, mode, observer):
"""
Set flags for a sequence of messages.
:param mbox: the mbox this message belongs to
:type mbox: str or unicode
:param messages: the messages to iterate through
:type messages: sequence
:flags: the flags to be set
:type flags: tuple
:param mode: the mode for setting. 1 is append, -1 is remove, 0 set.
:type mode: int
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
reactor = self.reactor
getmsg = self.get_msg_by_uid
def set_flags(uid, flags, mode):
msg = getmsg(uid, mem_only=True, flags_only=True)
if msg is not None:
return uid, msg.setFlags(flags, mode)
setted_flags = [set_flags(uid, flags, mode) for uid in messages]
result = dict(filter(None, setted_flags))
reactor.callFromThread(observer.callback, result)
# getters: generic for a mailbox
def get_msg_by_uid(self, uid, mem_only=False, flags_only=False):
"""
Retrieves a LeapMessage by UID.
This is used primarity in the Mailbox fetch and store methods.
:param uid: the message uid to query by
:type uid: int
:param mem_only: a flag that indicates whether this Message should
pass a reference to soledad to retrieve missing pieces
or not.
:type mem_only: bool
:param flags_only: whether the message should carry only a reference
to the flags document.
:type flags_only: bool
:return: A LeapMessage instance matching the query,
or None if not found.
:rtype: LeapMessage
"""
msg_container = self.memstore.get_message(
self.mbox, uid, flags_only=flags_only)
if msg_container is not None:
if mem_only:
msg = LeapMessage(None, uid, self.mbox, collection=self,
container=msg_container)
else:
# We pass a reference to soledad just to be able to retrieve
# missing parts that cannot be found in the container, like
# the content docs after a copy.
msg = LeapMessage(self._soledad, uid, self.mbox,
collection=self, container=msg_container)
else:
msg = LeapMessage(self._soledad, uid, self.mbox, collection=self)
if not msg.does_exist():
return None
return msg
def get_all_docs(self, _type=fields.TYPE_FLAGS_VAL):
"""
Get all documents for the selected mailbox of the
passed type. By default, it returns the flag docs.
If you want acess to the content, use __iter__ instead
:return: a list of u1db documents
:rtype: list of SoledadDocument
"""
if _type not in fields.__dict__.values():
raise TypeError("Wrong type passed to get_all_docs")
if sameProxiedObjects(self._soledad, None):
logger.warning('Tried to get messages but soledad is None!')
return []
all_docs = [doc for doc in self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
_type, self.mbox)]
# inneficient, but first let's grok it and then
# let's worry about efficiency.
# XXX FIXINDEX -- should implement order by in soledad
# FIXME ----------------------------------------------
return sorted(all_docs, key=lambda item: item.content['uid'])
def all_soledad_uid_iter(self):
"""
Return an iterator through the UIDs of all messages, sorted in
ascending order.
"""
db_uids = set([doc.content[self.UID_KEY] for doc in
self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
fields.TYPE_FLAGS_VAL, self.mbox)
if not empty(doc)])
return db_uids
def all_uid_iter(self):
"""
Return an iterator through the UIDs of all messages, from memory.
"""
mem_uids = self.memstore.get_uids(self.mbox)
soledad_known_uids = self.memstore.get_soledad_known_uids(
self.mbox)
combined = tuple(set(mem_uids).union(soledad_known_uids))
return combined
def get_all_soledad_flag_docs(self):
"""
Return a dict with the content of all the flag documents
in soledad store for the given mbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: dict
"""
# XXX we really could return a reduced version with
# just {'uid': (flags-tuple,) since the prefetch is
# only oriented to get the flag tuples.
all_docs = [(
doc.content[self.UID_KEY],
dict(doc.content))
for doc in
self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
fields.TYPE_FLAGS_VAL, self.mbox)
if not empty(doc.content)]
all_flags = dict(all_docs)
return all_flags
def all_headers(self):
"""
Return a dict with all the header documents for this
mailbox.
:rtype: dict
"""
return self.memstore.all_headers(self.mbox)
def count(self):
"""
Return the count of messages for this mailbox.
:rtype: int
"""
return self.memstore.count(self.mbox)
# unseen messages
def unseen_iter(self):
"""
Get an iterator for the message UIDs with no `seen` flag
for this mailbox.
:return: iterator through unseen message doc UIDs
:rtype: iterable
"""
return self.memstore.unseen_iter(self.mbox)
def count_unseen(self):
"""
Count all messages with the `Unseen` flag.
:returns: count
:rtype: int
"""
return len(list(self.unseen_iter()))
def get_unseen(self):
"""
Get all messages with the `Unseen` flag
:returns: a list of LeapMessages
:rtype: list
"""
return [LeapMessage(self._soledad, docid, self.mbox, collection=self)
for docid in self.unseen_iter()]
# recent messages
# XXX take it from memstore
def count_recent(self):
"""
Count all messages with the `Recent` flag.
It just retrieves the length of the recent_flags set,
which is stored in a specific type of document for
this collection.
:returns: count
:rtype: int
"""
return len(self.recent_flags)
def __len__(self):
"""
Returns the number of messages on this mailbox.
:rtype: int
"""
return self.count()
def __iter__(self):
"""
Returns an iterator over all messages.
:returns: iterator of dicts with content for all messages.
:rtype: iterable
"""
return (LeapMessage(self._soledad, docuid, self.mbox, collection=self)
for docuid in self.all_uid_iter())
def __repr__(self):
"""
Representation string for this object.
"""
return u"<MessageCollection: mbox '%s' (%s)>" % (
self.mbox, self.count())
# XXX should implement __eq__ also !!!
# use chash...
|
laborautonomo/leap_mail
|
src/leap/mail/imap/messages.py
|
Python
|
gpl-3.0
| 45,339
|
[
"MOE"
] |
a6870f63c8a3e1732264b3df35963350eeaee83198659a5a3210f4d849a577b2
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espressopp.esutil import pmiimport
pmiimport('espressopp.storage')
from espressopp.storage.Storage import *
from espressopp.storage.DomainDecomposition import *
from espressopp.storage.DomainDecompositionNonBlocking import *
from espressopp.storage.DomainDecompositionAdress import *
|
kkreis/espressopp
|
src/storage/__init__.py
|
Python
|
gpl-3.0
| 1,161
|
[
"ESPResSo"
] |
b85769fdbd1c14ea2b6e1b088b84b26ee61046081272e680e846c4605612f0ab
|
# -*- coding: utf-8 -*-
""" Kullback-Leibler divergence functions and klUCB utilities.
- Faster implementation can be found in a C file, in ``Policies/C``, and should be compiled to speedup computations.
- Cf. http://banditslilian.gforge.inria.fr/docs/Policies.kullback.html for a documentation.
- Cf. https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
- Reference: [Filippi, Cappé & Garivier - Allerton, 2011](https://arxiv.org/pdf/1004.5229.pdf) and [Garivier & Cappé, 2011](https://arxiv.org/pdf/1102.2490.pdf)
.. warning::
All function are *not* vectorized, and assume only one value for each argument.
If you want vectorized function, use the wrapper :func:`numpy.vectorize`:
>>> import numpy as np
>>> klBern_vect = np.vectorize(klBern)
>>> klBern_vect([0.1, 0.5, 0.9], 0.2) # doctest: +ELLIPSIS
array([ 0.036..., 0.223..., 1.145...])
>>> klBern_vect(0.4, [0.2, 0.3, 0.4]) # doctest: +ELLIPSIS
array([ 0.104..., 0.022..., 0...])
>>> klBern_vect([0.1, 0.5, 0.9], [0.2, 0.3, 0.4]) # doctest: +ELLIPSIS
array([ 0.036..., 0.087..., 0.550...])
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "Olivier Cappé, Aurélien Garivier, Lilian Besson"
__version__ = "0.6"
from math import log, sqrt, exp
import numpy as np
# Warning: np.dot is miserably slow!
eps = 1e-15 #: Threshold value: everything in [0, 1] is truncated to [eps, 1 - eps]
# --- Simple Kullback-Leibler divergence for known distributions
def klBern(x, y):
""" Kullback-Leibler divergence for Bernoulli distributions. https://en.wikipedia.org/wiki/Bernoulli_distribution#Kullback.E2.80.93Leibler_divergence
>>> klBern(0.5, 0.5)
0.0
>>> klBern(0.1, 0.9) # doctest: +ELLIPSIS
1.757779...
>>> klBern(0.9, 0.1) # And this KL is symetric # doctest: +ELLIPSIS
1.757779...
>>> klBern(0.4, 0.5) # doctest: +ELLIPSIS
0.020135...
>>> klBern(0.01, 0.99) # doctest: +ELLIPSIS
4.503217...
- Special values:
>>> klBern(0, 1) # Should be +inf, but 0 --> eps, 1 --> 1 - eps # doctest: +ELLIPSIS
34.539575...
"""
x = min(max(x, eps), 1 - eps)
y = min(max(y, eps), 1 - eps)
return x * log(x / y) + (1 - x) * log((1 - x) / (1 - y))
def klBin(x, y, n):
""" Kullback-Leibler divergence for Binomial distributions. https://math.stackexchange.com/questions/320399/kullback-leibner-divergence-of-binomial-distributions
Warning, the two distributions must have the same parameter n, and x, y are p, q in (0, 1).
>>> klBin(0.5, 0.5, 10)
0.0
>>> klBin(0.1, 0.9, 10) # doctest: +ELLIPSIS
17.57779...
>>> klBin(0.9, 0.1, 10) # And this KL is symetric # doctest: +ELLIPSIS
17.57779...
>>> klBin(0.4, 0.5, 10) # doctest: +ELLIPSIS
0.20135...
>>> klBin(0.01, 0.99, 10) # doctest: +ELLIPSIS
45.03217...
- Special values:
>>> klBin(0, 1, 10) # Should be +inf, but 0 --> eps, 1 --> 1 - eps # doctest: +ELLIPSIS
345.39575...
"""
x = min(max(x, eps), 1 - eps)
y = min(max(y, eps), 1 - eps)
return n * (x * log(x / y) + (1 - x) * log((1 - x) / (1 - y)))
def klPoisson(x, y):
""" Kullback-Leibler divergence for Poison distributions. https://en.wikipedia.org/wiki/Poisson_distribution#Kullback.E2.80.93Leibler_divergence
>>> klPoisson(3, 3)
0.0
>>> klPoisson(2, 1) # doctest: +ELLIPSIS
0.386294...
>>> klPoisson(1, 2) # And this KL is non-symetric # doctest: +ELLIPSIS
0.306852...
>>> klPoisson(3, 6) # doctest: +ELLIPSIS
0.920558...
>>> klPoisson(6, 8) # doctest: +ELLIPSIS
0.273907...
- Special values:
>>> klPoisson(1, 0) # Should be +inf, but 0 --> eps, 1 --> 1 - eps # doctest: +ELLIPSIS
33.538776...
>>> klPoisson(0, 0)
0.0
"""
x = max(x, eps)
y = max(y, eps)
return y - x + x * log(x / y)
def klExp(x, y):
""" Kullback-Leibler divergence for exponential distributions. https://en.wikipedia.org/wiki/Exponential_distribution#Kullback.E2.80.93Leibler_divergence
>>> klExp(3, 3)
0.0
>>> klExp(3, 6) # doctest: +ELLIPSIS
0.193147...
>>> klExp(1, 2) # Only the proportion between x and y is used # doctest: +ELLIPSIS
0.193147...
>>> klExp(2, 1) # And this KL is non-symetric # doctest: +ELLIPSIS
0.306852...
>>> klExp(4, 2) # Only the proportion between x and y is used # doctest: +ELLIPSIS
0.306852...
>>> klExp(6, 8) # doctest: +ELLIPSIS
0.037682...
- x, y have to be positive:
>>> klExp(-3, 2)
inf
>>> klExp(3, -2)
inf
>>> klExp(-3, -2)
inf
"""
if x <= 0 or y <= 0:
return float('+inf')
else:
x = max(x, eps)
y = max(y, eps)
return x / y - 1 - log(x / y)
def klGamma(x, y, a=1):
""" Kullback-Leibler divergence for gamma distributions. https://en.wikipedia.org/wiki/Gamma_distribution#Kullback.E2.80.93Leibler_divergence
>>> klGamma(3, 3)
0.0
>>> klGamma(3, 6) # doctest: +ELLIPSIS
0.193147...
>>> klGamma(1, 2) # Only the proportion between x and y is used # doctest: +ELLIPSIS
0.193147...
>>> klGamma(2, 1) # And this KL is non-symetric # doctest: +ELLIPSIS
0.306852...
>>> klGamma(4, 2) # Only the proportion between x and y is used # doctest: +ELLIPSIS
0.306852...
>>> klGamma(6, 8) # doctest: +ELLIPSIS
0.037682...
- x, y have to be positive:
>>> klGamma(-3, 2)
inf
>>> klGamma(3, -2)
inf
>>> klGamma(-3, -2)
inf
"""
if x <= 0 or y <= 0:
return float('+inf')
else:
x = max(x, eps)
y = max(y, eps)
return a * (x / y - 1 - log(x / y))
def klNegBin(x, y, r=1):
""" Kullback-Leibler divergence for negative binomial distributions. https://en.wikipedia.org/wiki/Gamma_distribution
>>> klNegBin(0.5, 0.5)
0.0
>>> klNegBin(0.1, 0.9) # doctest: +ELLIPSIS
-0.711611...
>>> klNegBin(0.9, 0.1) # And this KL is non-symetric # doctest: +ELLIPSIS
2.0321564...
>>> klNegBin(0.4, 0.5) # doctest: +ELLIPSIS
-0.130653...
>>> klNegBin(0.01, 0.99) # doctest: +ELLIPSIS
-0.717353...
- Special values:
>>> klBern(0, 1) # Should be +inf, but 0 --> eps, 1 --> 1 - eps # doctest: +ELLIPSIS
34.539575...
- With other values for `r`:
>>> klNegBin(0.5, 0.5, r=2)
0.0
>>> klNegBin(0.1, 0.9, r=2) # doctest: +ELLIPSIS
-0.832991...
>>> klNegBin(0.1, 0.9, r=4) # doctest: +ELLIPSIS
-0.914890...
>>> klNegBin(0.9, 0.1, r=2) # And this KL is non-symetric # doctest: +ELLIPSIS
2.3325528...
>>> klNegBin(0.4, 0.5, r=2) # doctest: +ELLIPSIS
-0.154572...
>>> klNegBin(0.01, 0.99, r=2) # doctest: +ELLIPSIS
-0.836257...
"""
x = max(x, eps)
y = max(y, eps)
return r * log((r + x) / (r + y)) - x * log(y * (r + x) / (x * (r + y)))
def klGauss(x, y, sig2=0.25):
""" Kullback-Leibler divergence for Gaussian distributions. https://en.wikipedia.org/wiki/Normal_distribution#Kullback.E2.80.93Leibler_divergence
>>> klGauss(3, 3)
0.0
>>> klGauss(3, 6)
18.0
>>> klGauss(1, 2)
2.0
>>> klGauss(2, 1) # And this KL is symetric
2.0
>>> klGauss(4, 2)
8.0
>>> klGauss(6, 8)
8.0
- x, y can be negative:
>>> klGauss(-3, 2)
50.0
>>> klGauss(3, -2)
50.0
>>> klGauss(-3, -2)
2.0
>>> klGauss(3, 2)
2.0
- With other values for `sig2`:
>>> klGauss(3, 3, sig2=10)
0.0
>>> klGauss(3, 6, sig2=10)
0.45
>>> klGauss(1, 2, sig2=10)
0.05
>>> klGauss(2, 1, sig2=10) # And this KL is symetric
0.05
>>> klGauss(4, 2, sig2=10)
0.2
>>> klGauss(6, 8, sig2=10)
0.2
"""
return (x - y) ** 2 / (2 * sig2)
# --- KL functions, for the KL-UCB policy
def klucb(x, d, kl, upperbound, lowerbound=float('-inf'), precision=1e-6):
""" The generic KL-UCB index computation.
- x: value of the cum reward,
- d: upper bound on the divergence,
- kl: the KL divergence to be used (klBern, klGauss, etc),
- upperbound, lowerbound=float('-inf'): the known bound of the values x,
- precision=1e-6: the threshold from where to stop the research,
.. note:: It uses a bisection search.
"""
value = max(x, lowerbound)
u = upperbound
while u - value > precision:
m = (value + u) / 2.
if kl(x, m) > d:
u = m
else:
value = m
return (value + u) / 2.
def klucbBern(x, d, precision=1e-6):
""" KL-UCB index computation for Bernoulli distributions, using :func:`klucb`.
- Influence of x:
>>> klucbBern(0.1, 0.2) # doctest: +ELLIPSIS
0.378391...
>>> klucbBern(0.5, 0.2) # doctest: +ELLIPSIS
0.787088...
>>> klucbBern(0.9, 0.2) # doctest: +ELLIPSIS
0.994489...
- Influence of d:
>>> klucbBern(0.1, 0.4) # doctest: +ELLIPSIS
0.519475...
>>> klucbBern(0.1, 0.9) # doctest: +ELLIPSIS
0.734714...
>>> klucbBern(0.5, 0.4) # doctest: +ELLIPSIS
0.871035...
>>> klucbBern(0.5, 0.9) # doctest: +ELLIPSIS
0.956809...
>>> klucbBern(0.9, 0.4) # doctest: +ELLIPSIS
0.999285...
>>> klucbBern(0.9, 0.9) # doctest: +ELLIPSIS
0.999995...
"""
upperbound = min(1., klucbGauss(x, d, sig2=0.25))
# upperbound = min(1., klucbPoisson(x, d)) # also safe, and better ?
return klucb(x, d, klBern, upperbound, precision)
def klucbGauss(x, d, sig2=0.25, precision=0.):
""" KL-UCB index computation for Gaussian distributions.
- Note that it does not require any search.
- Warning: it works only if the good variance constant is given.
- Influence of x:
>>> klucbGauss(0.1, 0.2) # doctest: +ELLIPSIS
0.416227...
>>> klucbGauss(0.5, 0.2) # doctest: +ELLIPSIS
0.816227...
>>> klucbGauss(0.9, 0.2) # doctest: +ELLIPSIS
1.216227...
- Influence of d:
>>> klucbGauss(0.1, 0.4) # doctest: +ELLIPSIS
0.547213...
>>> klucbGauss(0.1, 0.9) # doctest: +ELLIPSIS
0.770820...
>>> klucbGauss(0.5, 0.4) # doctest: +ELLIPSIS
0.947213...
>>> klucbGauss(0.5, 0.9) # doctest: +ELLIPSIS
1.170820...
>>> klucbGauss(0.9, 0.4) # doctest: +ELLIPSIS
1.347213...
>>> klucbGauss(0.9, 0.9) # doctest: +ELLIPSIS
1.570820...
"""
return x + sqrt(2 * sig2 * d)
def klucbPoisson(x, d, precision=1e-6):
""" KL-UCB index computation for Poisson distributions, using :func:`klucb`.
- Influence of x:
>>> klucbPoisson(0.1, 0.2) # doctest: +ELLIPSIS
0.450523...
>>> klucbPoisson(0.5, 0.2) # doctest: +ELLIPSIS
1.089376...
>>> klucbPoisson(0.9, 0.2) # doctest: +ELLIPSIS
1.640112...
- Influence of d:
>>> klucbPoisson(0.1, 0.4) # doctest: +ELLIPSIS
0.693684...
>>> klucbPoisson(0.1, 0.9) # doctest: +ELLIPSIS
1.252796...
>>> klucbPoisson(0.5, 0.4) # doctest: +ELLIPSIS
1.422933...
>>> klucbPoisson(0.5, 0.9) # doctest: +ELLIPSIS
2.122985...
>>> klucbPoisson(0.9, 0.4) # doctest: +ELLIPSIS
2.033691...
>>> klucbPoisson(0.9, 0.9) # doctest: +ELLIPSIS
2.831573...
"""
upperbound = x + d + sqrt(d * d + 2 * x * d) # looks safe, to check: left (Gaussian) tail of Poisson dev
return klucb(x, d, klPoisson, upperbound, precision)
def klucbExp(x, d, precision=1e-6):
""" KL-UCB index computation for exponential distributions, using :func:`klucb`.
- Influence of x:
>>> klucbExp(0.1, 0.2) # doctest: +ELLIPSIS
0.202741...
>>> klucbExp(0.5, 0.2) # doctest: +ELLIPSIS
1.013706...
>>> klucbExp(0.9, 0.2) # doctest: +ELLIPSIS
1.824671...
- Influence of d:
>>> klucbExp(0.1, 0.4) # doctest: +ELLIPSIS
0.285792...
>>> klucbExp(0.1, 0.9) # doctest: +ELLIPSIS
0.559088...
>>> klucbExp(0.5, 0.4) # doctest: +ELLIPSIS
1.428962...
>>> klucbExp(0.5, 0.9) # doctest: +ELLIPSIS
2.795442...
>>> klucbExp(0.9, 0.4) # doctest: +ELLIPSIS
2.572132...
>>> klucbExp(0.9, 0.9) # doctest: +ELLIPSIS
5.031795...
"""
if d < 0.77: # XXX where does this value come from?
upperbound = x / (1 + 2. / 3 * d - sqrt(4. / 9 * d * d + 2 * d))
# safe, klexp(x,y) >= e^2/(2*(1-2e/3)) if x=y(1-e)
else:
upperbound = x * exp(d + 1)
if d > 1.61: # XXX where does this value come from?
lowerbound = x * exp(d)
else:
lowerbound = x / (1 + d - sqrt(d * d + 2 * d))
return klucb(x, d, klGamma, upperbound, lowerbound, precision)
# FIXME this one is wrong!
def klucbGamma(x, d, precision=1e-6):
""" KL-UCB index computation for Gamma distributions, using :func:`klucb`.
- Influence of x:
>>> klucbGamma(0.1, 0.2) # doctest: +ELLIPSIS
0.202...
>>> klucbGamma(0.5, 0.2) # doctest: +ELLIPSIS
1.013...
>>> klucbGamma(0.9, 0.2) # doctest: +ELLIPSIS
1.824...
- Influence of d:
>>> klucbGamma(0.1, 0.4) # doctest: +ELLIPSIS
0.285...
>>> klucbGamma(0.1, 0.9) # doctest: +ELLIPSIS
0.559...
>>> klucbGamma(0.5, 0.4) # doctest: +ELLIPSIS
1.428...
>>> klucbGamma(0.5, 0.9) # doctest: +ELLIPSIS
2.795...
>>> klucbGamma(0.9, 0.4) # doctest: +ELLIPSIS
2.572...
>>> klucbGamma(0.9, 0.9) # doctest: +ELLIPSIS
5.031...
"""
if d < 0.77: # XXX where does this value come from?
upperbound = x / (1 + 2. / 3 * d - sqrt(4. / 9 * d * d + 2 * d))
# safe, klexp(x,y) >= e^2/(2*(1-2e/3)) if x=y(1-e)
else:
upperbound = x * exp(d + 1)
if d > 1.61: # XXX where does this value come from?
lowerbound = x * exp(d)
else:
lowerbound = x / (1 + d - sqrt(d * d + 2 * d))
# FIXME specify the value for a !
return klucb(x, d, klGamma, max(upperbound, 1e2), min(-1e2, lowerbound), precision)
# --- max EV functions
def maxEV(p, V, klMax):
""" Maximize expectation of V wrt. q st. KL(p, q) < klMax.
- Input args.: p, V, klMax.
- Reference: Section 3.2 of [Filippi, Cappé & Garivier - Allerton, 2011](https://arxiv.org/pdf/1004.5229.pdf).
"""
Uq = np.zeros(len(p))
Kb = p > 0.
K = ~Kb
if any(K):
# Do we need to put some mass on a point where p is zero?
# If yes, this has to be on one which maximizes V.
eta = np.max(V[K])
J = K & (V == eta)
if eta > np.max(V[Kb]):
y = np.dot(p[Kb], np.log(eta - V[Kb])) + log(np.dot(p[Kb], (1. / (eta - V[Kb]))))
# print("eta = ", eta, ", y = ", y)
if y < klMax:
rb = exp(y - klMax)
Uqtemp = p[Kb] / (eta - V[Kb])
Uq[Kb] = rb * Uqtemp / np.sum(Uqtemp)
Uq[J] = (1. - rb) / np.sum(J)
# or j = min([j for j in range(k) if J[j]])
# Uq[j] = r
return Uq
# Here, only points where p is strictly positive (in Kb) will receive non-zero mass.
if any(np.abs(V[Kb] - V[Kb][0]) > 1e-8):
eta = reseqp(p[Kb], V[Kb], klMax) # (eta = nu in the article)
Uq = p / (eta - V)
Uq = Uq / np.sum(Uq)
else:
# Case where all values in V(Kb) are almost identical.
Uq[Kb] = 1.0 / len(Kb)
return Uq
def reseqp(p, V, klMax):
""" Solve f(reseqp(p, V, klMax)) = klMax, using Newton method.
.. note:: This is a subroutine of :func:`maxEV`.
- Reference: Eq. (4) in Section 3.2 of [Filippi, Cappé & Garivier - Allerton, 2011](https://arxiv.org/pdf/1004.5229.pdf).
- Warning: `np.dot` is very slow!
"""
MV = np.max(V)
mV = np.min(V)
value = MV + 0.1
tol = 1e-4
if MV < mV + tol:
return float('inf')
u = np.dot(p, (1 / (value - V)))
y = np.dot(p, np.log(value - V)) + log(u) - klMax
print("value =", value, ", y = ", y) # DEBUG
while np.abs(y) > tol:
yp = u - np.dot(p, (1 / (value - V)**2)) / u # derivative
value -= y / yp
print("value = ", value) # DEBUG # newton iteration
if value < MV:
value = (value + y / yp + MV) / 2 # unlikely, but not impossible
u = np.dot(p, (1 / (value - V)))
y = np.dot(p, np.log(value - V)) + np.log(u) - klMax
print("value = ", value, ", y = ", y) # DEBUG # function
return value
# https://www.docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.fixed_point
from scipy.optimize import minimize
def reseqp2(p, V, klMax):
""" Solve f(reseqp(p, V, klMax)) = klMax, using a blackbox minimizer, from scipy.optimize.
- FIXME it does not work well yet!
.. note:: This is a subroutine of :func:`maxEV`.
- Reference: Eq. (4) in Section 3.2 of [Filippi, Cappé & Garivier - Allerton, 2011].
- Warning: `np.dot` is very slow!
"""
MV = np.max(V)
mV = np.min(V)
tol = 1e-4
value0 = mV + 0.1
def f(value):
""" Function fo to minimize."""
if MV < mV + tol:
y = float('inf')
else:
u = np.dot(p, (1 / (value - V)))
y = np.dot(p, np.log(value - V)) + log(u)
return np.abs(y - klMax)
res = minimize(f, value0)
print("scipy.optimize.minimize returned", res)
return res.x
# --- Debugging
if __name__ == "__main__":
# Code for debugging purposes.
from doctest import testmod
print("\nTesting automatically all the docstring written in each functions of this module :")
testmod(verbose=True)
# import matplotlib.pyplot as plt
# t = np.linspace(0, 1)
# plt.subplot(2, 1, 1)
# plt.plot(t, kl(t, 0.6))
# plt.subplot(2, 1, 2)
# d = np.linspace(0, 1, 100)
# plt.plot(d, [klucb(0.3, dd) for dd in d])
# plt.show()
print("\nklucbGauss(0.9, 0.2) =", klucbGauss(0.9, 0.2))
print("klucbBern(0.9, 0.2) =", klucbBern(0.9, 0.2))
print("klucbPoisson(0.9, 0.2) =", klucbPoisson(0.9, 0.2))
p = np.array([0.5, 0.5])
print("\np =", p)
V = np.array([10, 3])
print("V =", V)
klMax = 0.1
print("klMax =", klMax)
print("eta = ", reseqp(p, V, klMax))
# print("eta 2 = ", reseqp2(p, V, klMax))
print("Uq = ", maxEV(p, V, klMax))
print("\np =", p)
p = np.array([0.11794872, 0.27948718, 0.31538462, 0.14102564, 0.0974359, 0.03076923, 0.00769231, 0.01025641, 0.])
print("V =", V)
V = np.array([0, 1, 2, 3, 4, 5, 6, 7, 10])
klMax = 0.0168913409484
print("klMax =", klMax)
print("eta = ", reseqp(p, V, klMax))
# print("eta 2 = ", reseqp2(p, V, klMax))
print("Uq = ", maxEV(p, V, klMax))
x = 2
print("\nx =", x)
d = 2.51
print("d =", d)
print("klucbExp(x, d) = ", klucbExp(x, d))
ub = x / (1 + 2. / 3 * d - sqrt(4. / 9 * d * d + 2 * d))
print("Upper bound = ", ub)
print("Stupid upperbound = ", x * exp(d + 1))
print("\nDone for tests of 'kullback.py' ...")
|
Naereen/notebooks
|
kullback.py
|
Python
|
mit
| 18,923
|
[
"Gaussian"
] |
8395e5257e3a35076f256575657aec1df3732ee95970825c561afd6284f876ff
|
../../../../../../../../share/pyshared/orca/scripts/apps/yelp/yelp_v2/script_utilities.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/yelp/yelp_v2/script_utilities.py
|
Python
|
gpl-3.0
| 89
|
[
"ORCA"
] |
e38da35131b034912eff2870dbc1c8ef6c4787c7ab354cddfede56b5f6cbcff9
|
''' Synchronizer
Module that keeps the database synchronized with the CS
Module that updates the RSS database ( ResourceStatusDB ) with the information
in the Resources section. If there are additions in the CS, those are incorporated
to the DB. If there are deletions, entries in RSS tables for those elements are
deleted ( except the Logs table ).
'''
__RCSID__ = '$Id: $'
from DIRAC import gLogger, S_OK
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
from DIRAC.ResourceStatusSystem.Utilities.RssConfiguration import RssConfiguration
from DIRAC.ResourceStatusSystem.Utilities import Utils
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceURL
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
ResourceManagementClient = getattr(Utils.voimport(
'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient'), 'ResourceManagementClient')
class Synchronizer(object):
'''
Every time there is a successful write on the CS, Synchronizer().sync() is
executed. It updates the database with the values on the CS.
'''
def __init__(self, rStatus=None, rManagement=None, defaultStatus="Unknown"):
# Warm up local CS
CSHelpers.warmUp()
if rStatus is None:
self.rStatus = ResourceStatusClient()
if rManagement is None:
self.rManagement = ResourceManagementClient()
self.defaultStatus = defaultStatus
self.rssConfig = RssConfiguration()
self.tokenOwner = "rs_svc"
result = getProxyInfo()
if result['OK']:
self.tokenOwner = result['Value']['username']
def sync(self, _eventName, _params):
'''
Main synchronizer method. It synchronizes the three types of elements: Sites,
Resources and Nodes. Each _syncX method returns a dictionary with the additions
and deletions.
examples:
>>> s.sync( None, None )
S_OK()
:Parameters:
**_eventName** - any
this parameter is ignored, but needed by caller function.
**_params** - any
this parameter is ignored, but needed by caller function.
:return: S_OK
'''
syncSites = self._syncSites()
if not syncSites['OK']:
gLogger.error(syncSites['Message'])
syncResources = self._syncResources()
if not syncResources['OK']:
gLogger.error(syncResources['Message'])
syncNodes = self._syncNodes()
if not syncNodes['OK']:
gLogger.error(syncNodes['Message'])
return S_OK()
## Protected methods #########################################################
def _syncSites(self):
'''
Sync sites: compares CS with DB and does the necessary modifications.
'''
gLogger.info('-- Synchronizing sites --')
# sites in CS
res = CSHelpers.getSites()
if not res['OK']:
return res
sitesCS = res['Value']
gLogger.verbose('%s sites found in CS' % len(sitesCS))
# sites in RSS
result = self.rStatus.selectStatusElement('Site', 'Status',
meta={'columns': ['Name']})
if not result['OK']:
return result
sitesDB = [siteDB[0] for siteDB in result['Value']]
# Sites that are in DB but not (anymore) in CS
toBeDeleted = list(set(sitesDB).difference(set(sitesCS)))
gLogger.verbose('%s sites to be deleted' % len(toBeDeleted))
# Delete sites
for siteName in toBeDeleted:
deleteQuery = self.rStatus._extermineStatusElement(
'Site', siteName)
gLogger.verbose('Deleting site %s' % siteName)
if not deleteQuery['OK']:
return deleteQuery
# Sites that are in CS but not (anymore) in DB
toBeAdded = list(set(sitesCS).difference(set(sitesDB)))
gLogger.verbose('%s site entries to be added' % len(toBeAdded))
for site in toBeAdded:
query = self.rStatus.addIfNotThereStatusElement('Site', 'Status',
name=site,
statusType='all',
status=self.defaultStatus,
elementType='Site',
tokenOwner=self.tokenOwner,
reason='Synchronized')
if not query['OK']:
return query
return S_OK()
def _syncResources(self):
'''
Sync resources: compares CS with DB and does the necessary modifications.
( StorageElements, FTS, FileCatalogs and ComputingElements )
'''
gLogger.info('-- Synchronizing Resources --')
gLogger.verbose('-> StorageElements')
ses = self.__syncStorageElements()
if not ses['OK']:
gLogger.error(ses['Message'])
gLogger.verbose('-> FTS')
fts = self.__syncFTS()
if not fts['OK']:
gLogger.error(fts['Message'])
gLogger.verbose('-> FileCatalogs')
fileCatalogs = self.__syncFileCatalogs()
if not fileCatalogs['OK']:
gLogger.error(fileCatalogs['Message'])
gLogger.verbose('-> ComputingElements')
computingElements = self.__syncComputingElements()
if not computingElements['OK']:
gLogger.error(computingElements['Message'])
gLogger.verbose('-> removing resources that no longer exist in the CS')
removingResources = self.__removeNonExistingResourcesFromRM()
if not removingResources['OK']:
gLogger.error(removingResources['Message'])
# FIXME: VOMS
return S_OK()
def _syncNodes(self):
'''
Sync resources: compares CS with DB and does the necessary modifications.
( Queues )
'''
gLogger.info('-- Synchronizing Nodes --')
gLogger.verbose('-> Queues')
queues = self.__syncQueues()
if not queues['OK']:
gLogger.error(queues['Message'])
return S_OK()
## Private methods ###########################################################
def __removeNonExistingResourcesFromRM(self):
'''
Remove resources from DowntimeCache table that no longer exist in the CS.
'''
if not getServiceURL("ResourceStatus/ResourceManagement"):
gLogger.verbose(
'ResourceManagement is not installed, skipping removal of non existing resources...')
return S_OK()
sesHosts = CSHelpers.getStorageElementsHosts()
if not sesHosts['OK']:
return sesHosts
sesHosts = sesHosts['Value']
resources = sesHosts
ftsServer = getFTS3Servers()
if ftsServer['OK']:
resources.extend(ftsServer['Value'])
ce = CSHelpers.getComputingElements()
if ce['OK']:
resources.extend(ce['Value'])
downtimes = self.rManagement.selectDowntimeCache()
if not downtimes['OK']:
return downtimes
# Remove hosts that no longer exist in the CS
for host in downtimes['Value']:
gLogger.verbose('Checking if %s is still in the CS' % host[0])
if host[0] not in resources:
gLogger.verbose(
'%s is no longer in CS, removing entry...' % host[0])
result = self.rManagement.deleteDowntimeCache(name=host[0])
if not result['OK']:
return result
return S_OK()
def __syncComputingElements(self):
'''
Sync ComputingElements: compares CS with DB and does the necessary modifications.
'''
cesCS = CSHelpers.getComputingElements()
if not cesCS['OK']:
return cesCS
cesCS = cesCS['Value']
gLogger.verbose('%s Computing elements found in CS' % len(cesCS))
cesDB = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='ComputingElement',
meta={'columns': ['Name']})
if not cesDB['OK']:
return cesDB
cesDB = [ceDB[0] for ceDB in cesDB['Value']]
# ComputingElements that are in DB but not in CS
toBeDeleted = list(set(cesDB).difference(set(cesCS)))
gLogger.verbose('%s Computing elements to be deleted' %
len(toBeDeleted))
# Delete storage elements
for ceName in toBeDeleted:
deleteQuery = self.rStatus._extermineStatusElement(
'Resource', ceName)
gLogger.verbose('... %s' % ceName)
if not deleteQuery['OK']:
return deleteQuery
#statusTypes = RssConfiguration.getValidStatusTypes()[ 'Resource' ]
statusTypes = self.rssConfig.getConfigStatusType('ComputingElement')
result = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='ComputingElement',
meta={'columns': ['Name', 'StatusType']})
if not result['OK']:
return result
cesTuple = [(x[0], x[1]) for x in result['Value']]
# For each ( se, statusType ) tuple not present in the DB, add it.
cesStatusTuples = [(se, statusType)
for se in cesCS for statusType in statusTypes]
toBeAdded = list(set(cesStatusTuples).difference(set(cesTuple)))
gLogger.debug('%s Computing elements entries to be added' %
len(toBeAdded))
for ceTuple in toBeAdded:
_name = ceTuple[0]
_statusType = ceTuple[1]
_status = self.defaultStatus
_reason = 'Synchronized'
_elementType = 'ComputingElement'
query = self.rStatus.addIfNotThereStatusElement('Resource', 'Status', name=_name,
statusType=_statusType,
status=_status,
elementType=_elementType,
tokenOwner=self.tokenOwner,
reason=_reason)
if not query['OK']:
return query
return S_OK()
def __syncFileCatalogs(self):
'''
Sync FileCatalogs: compares CS with DB and does the necessary modifications.
'''
catalogsCS = CSHelpers.getFileCatalogs()
if not catalogsCS['OK']:
return catalogsCS
catalogsCS = catalogsCS['Value']
gLogger.verbose('%s File catalogs found in CS' % len(catalogsCS))
catalogsDB = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='Catalog',
meta={'columns': ['Name']})
if not catalogsDB['OK']:
return catalogsDB
catalogsDB = [catalogDB[0] for catalogDB in catalogsDB['Value']]
# StorageElements that are in DB but not in CS
toBeDeleted = list(set(catalogsDB).difference(set(catalogsCS)))
gLogger.verbose('%s File catalogs to be deleted' % len(toBeDeleted))
# Delete storage elements
for catalogName in toBeDeleted:
deleteQuery = self.rStatus._extermineStatusElement(
'Resource', catalogName)
gLogger.verbose('... %s' % catalogName)
if not deleteQuery['OK']:
return deleteQuery
#statusTypes = RssConfiguration.getValidStatusTypes()[ 'Resource' ]
statusTypes = self.rssConfig.getConfigStatusType('Catalog')
result = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='Catalog',
meta={'columns': ['Name', 'StatusType']})
if not result['OK']:
return result
sesTuple = [(x[0], x[1]) for x in result['Value']]
# For each ( se, statusType ) tuple not present in the DB, add it.
catalogsStatusTuples = [(se, statusType)
for se in catalogsCS for statusType in statusTypes]
toBeAdded = list(set(catalogsStatusTuples).difference(set(sesTuple)))
gLogger.verbose('%s File catalogs entries to be added' %
len(toBeAdded))
for catalogTuple in toBeAdded:
_name = catalogTuple[0]
_statusType = catalogTuple[1]
_status = self.defaultStatus
_reason = 'Synchronized'
_elementType = 'Catalog'
query = self.rStatus.addIfNotThereStatusElement('Resource', 'Status', name=_name,
statusType=_statusType,
status=_status,
elementType=_elementType,
tokenOwner=self.tokenOwner,
reason=_reason)
if not query['OK']:
return query
return S_OK()
def __syncFTS(self):
'''
Sync FTS: compares CS with DB and does the necessary modifications.
'''
ftsCS = CSHelpers.getFTS()
if not ftsCS['OK']:
return ftsCS
ftsCS = ftsCS['Value']
gLogger.verbose('%s FTS endpoints found in CS' % len(ftsCS))
ftsDB = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='FTS',
meta={'columns': ['Name']})
if not ftsDB['OK']:
return ftsDB
ftsDB = [fts[0] for fts in ftsDB['Value']]
# StorageElements that are in DB but not in CS
toBeDeleted = list(set(ftsDB).difference(set(ftsCS)))
gLogger.verbose('%s FTS endpoints to be deleted' % len(toBeDeleted))
# Delete storage elements
for ftsName in toBeDeleted:
deleteQuery = self.rStatus._extermineStatusElement(
'Resource', ftsName)
gLogger.verbose('... %s' % ftsName)
if not deleteQuery['OK']:
return deleteQuery
statusTypes = self.rssConfig.getConfigStatusType('FTS')
#statusTypes = RssConfiguration.getValidStatusTypes()[ 'Resource' ]
result = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='FTS',
meta={'columns': ['Name', 'StatusType']})
if not result['OK']:
return result
sesTuple = [(x[0], x[1]) for x in result['Value']]
# For each ( se, statusType ) tuple not present in the DB, add it.
ftsStatusTuples = [(se, statusType)
for se in ftsCS for statusType in statusTypes]
toBeAdded = list(set(ftsStatusTuples).difference(set(sesTuple)))
gLogger.verbose('%s FTS endpoints entries to be added' %
len(toBeAdded))
for ftsTuple in toBeAdded:
_name = ftsTuple[0]
_statusType = ftsTuple[1]
_status = self.defaultStatus
_reason = 'Synchronized'
_elementType = 'FTS'
query = self.rStatus.addIfNotThereStatusElement('Resource', 'Status', name=_name,
statusType=_statusType,
status=_status,
elementType=_elementType,
tokenOwner=self.tokenOwner,
reason=_reason)
if not query['OK']:
return query
return S_OK()
def __syncStorageElements(self):
'''
Sync StorageElements: compares CS with DB and does the necessary modifications.
'''
sesCS = CSHelpers.getStorageElements()
if not sesCS['OK']:
return sesCS
sesCS = sesCS['Value']
gLogger.verbose('%s storage elements found in CS' % len(sesCS))
sesDB = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='StorageElement',
meta={'columns': ['Name']})
if not sesDB['OK']:
return sesDB
sesDB = [seDB[0] for seDB in sesDB['Value']]
# StorageElements that are in DB but not in CS
toBeDeleted = list(set(sesDB).difference(set(sesCS)))
gLogger.verbose('%s storage elements to be deleted' % len(toBeDeleted))
# Delete storage elements
for sesName in toBeDeleted:
deleteQuery = self.rStatus._extermineStatusElement(
'Resource', sesName)
gLogger.verbose('... %s' % sesName)
if not deleteQuery['OK']:
return deleteQuery
statusTypes = self.rssConfig.getConfigStatusType('StorageElement')
#statusTypes = RssConfiguration.getValidStatusTypes()[ 'Resource' ]
result = self.rStatus.selectStatusElement('Resource', 'Status',
elementType='StorageElement',
meta={'columns': ['Name', 'StatusType']})
if not result['OK']:
return result
sesTuple = [(x[0], x[1]) for x in result['Value']]
# For each ( se, statusType ) tuple not present in the DB, add it.
sesStatusTuples = [(se, statusType)
for se in sesCS for statusType in statusTypes]
toBeAdded = list(set(sesStatusTuples).difference(set(sesTuple)))
gLogger.verbose('%s storage element entries to be added' %
len(toBeAdded))
for seTuple in toBeAdded:
_name = seTuple[0]
_statusType = seTuple[1]
_status = self.defaultStatus
_reason = 'Synchronized'
_elementType = 'StorageElement'
query = self.rStatus.addIfNotThereStatusElement('Resource', 'Status', name=_name,
statusType=_statusType,
status=_status,
elementType=_elementType,
tokenOwner=self.tokenOwner,
reason=_reason)
if not query['OK']:
return query
return S_OK()
def __syncQueues(self):
'''
Sync Queues: compares CS with DB and does the necessary modifications.
'''
queuesCS = CSHelpers.getQueues()
if not queuesCS['OK']:
return queuesCS
queuesCS = queuesCS['Value']
gLogger.verbose('%s Queues found in CS' % len(queuesCS))
queuesDB = self.rStatus.selectStatusElement('Node', 'Status',
elementType='Queue',
meta={'columns': ['Name']})
if not queuesDB['OK']:
return queuesDB
queuesDB = [queueDB[0] for queueDB in queuesDB['Value']]
# ComputingElements that are in DB but not in CS
toBeDeleted = list(set(queuesDB).difference(set(queuesCS)))
gLogger.verbose('%s Queues to be deleted' % len(toBeDeleted))
# Delete storage elements
for queueName in toBeDeleted:
deleteQuery = self.rStatus._extermineStatusElement(
'Node', queueName)
gLogger.verbose('... %s' % queueName)
if not deleteQuery['OK']:
return deleteQuery
statusTypes = self.rssConfig.getConfigStatusType('Queue')
#statusTypes = RssConfiguration.getValidStatusTypes()[ 'Node' ]
result = self.rStatus.selectStatusElement('Node', 'Status',
elementType='Queue',
meta={'columns': ['Name', 'StatusType']})
if not result['OK']:
return result
queueTuple = [(x[0], x[1]) for x in result['Value']]
# For each ( se, statusType ) tuple not present in the DB, add it.
queueStatusTuples = [(se, statusType)
for se in queuesCS for statusType in statusTypes]
toBeAdded = list(set(queueStatusTuples).difference(set(queueTuple)))
gLogger.verbose('%s Queue entries to be added' % len(toBeAdded))
for queueTuple in toBeAdded:
_name = queueTuple[0]
_statusType = queueTuple[1]
_status = self.defaultStatus
_reason = 'Synchronized'
_elementType = 'Queue'
query = self.rStatus.addIfNotThereStatusElement('Node', 'Status', name=_name,
statusType=_statusType,
status=_status,
elementType=_elementType,
tokenOwner=self.tokenOwner,
reason=_reason)
if not query['OK']:
return query
return S_OK()
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
arrabito/DIRAC
|
ResourceStatusSystem/Utilities/Synchronizer.py
|
Python
|
gpl-3.0
| 22,548
|
[
"DIRAC"
] |
d569acba742a0f751d6524d7c922ddc3cabccb76d1349ef477f5951d5f2d2347
|
import matplotlib.pyplot as plt
import numpy as np
class TweetVisualize(object):
def visualize_kde(self, est, min_time, max_time, scale_factor = 1.0):
#kde = KernelDensity(kernel='gaussian', bandwidth = bandwidth)
n_bins = (max_time - min_time) * 10
x = np.linspace(min_time, max_time, num=n_bins)
estimate = est(x)
plt.plot(x, [e * scale_factor for e in estimate])
num_ticks = 10
x_ticks = [x[i] for i in xrange(0, len(x), len(x)/num_ticks)]
x_tick_strs = [str(int(t/3600)) + ':' + str(int((t%3600)/60)) + ":" + str(int(t%60)) for t in x_ticks]
plt.xticks(x_ticks, x_tick_strs, rotation=45, size='small')
#Create a histogram of tweet counts based on the bins
def visualize_frequency(self, query_result, x_bins, bin_size):
def find_bin(t):
for i in xrange(len(x_bins)):
b = x_bins[i]
if (t >= b and t < b + bin_size):
return i
return -1
y_bins = [0 for i in xrange(len(x_bins))]
for key in query_result['tweet_created'].keys():
time = query_result['tweet_created'][key]
q_bin = find_bin(time)
if (q_bin != -1):
y_bins[q_bin] = y_bins[q_bin] + 1
plt.bar(x_bins, y_bins, edgecolor = "none", color='r', width=bin_size)
#show 10 ticks
num_ticks = 10
x_ticks = [x_bins[i] for i in xrange(0, len(x_bins), len(x_bins)/num_ticks)]
x_tick_strs = [str(t/3600) + ':' + str((t%3600)/60) + ":" + str(t%60) for t in x_ticks]
plt.xticks(x_ticks, x_tick_strs, rotation=45, size='small')
|
vrkrishn/FBHacks
|
src/reference/TweetVisualize.py
|
Python
|
mit
| 1,843
|
[
"Gaussian"
] |
2303b2769b13b2d6d07a9a21bf2b942c62c37910c4b93bbc1b90a65c11541a76
|
# Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
import json
import logging
import os
import tempfile
from datetime import datetime, timedelta
from unittest import TestCase
import macaroonbakery.bakery as bakery
import macaroonbakery.checkers as checkers
import macaroonbakery.httpbakery as httpbakery
import macaroonbakery.httpbakery.agent as agent
import requests.cookies
from httmock import HTTMock, response, urlmatch
from six.moves.urllib.parse import parse_qs, urlparse
log = logging.getLogger(__name__)
PRIVATE_KEY = 'CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU='
PUBLIC_KEY = 'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE='
class TestAgents(TestCase):
def setUp(self):
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(agent_file)
self.agent_filename = filename
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(bad_key_agent_file)
self.bad_key_agent_filename = filename
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(no_username_agent_file)
self.no_username_agent_filename = filename
def tearDown(self):
os.remove(self.agent_filename)
os.remove(self.bad_key_agent_filename)
os.remove(self.no_username_agent_filename)
def test_load_auth_info(self):
auth_info = agent.load_auth_info(self.agent_filename)
self.assertEqual(str(auth_info.key), PRIVATE_KEY)
self.assertEqual(str(auth_info.key.public_key), PUBLIC_KEY)
self.assertEqual(auth_info.agents, [
agent.Agent(url='https://1.example.com/', username='user-1'),
agent.Agent(url='https://2.example.com/discharger', username='user-2'),
agent.Agent(url='http://0.3.2.1', username='test-user'),
])
def test_invalid_agent_json(self):
with self.assertRaises(agent.AgentFileFormatError):
agent.read_auth_info('}')
def test_invalid_read_auth_info_arg(self):
with self.assertRaises(agent.AgentFileFormatError):
agent.read_auth_info(0)
def test_load_auth_info_with_bad_key(self):
with self.assertRaises(agent.AgentFileFormatError):
agent.load_auth_info(self.bad_key_agent_filename)
def test_load_auth_info_with_no_username(self):
with self.assertRaises(agent.AgentFileFormatError):
agent.load_auth_info(self.no_username_agent_filename)
def test_agent_login(self):
discharge_key = bakery.generate_key()
class _DischargerLocator(bakery.ThirdPartyLocator):
def third_party_info(self, loc):
if loc == 'http://0.3.2.1':
return bakery.ThirdPartyInfo(
public_key=discharge_key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
server_key = bakery.generate_key()
server_bakery = bakery.Bakery(key=server_key, locator=d)
@urlmatch(path='.*/here')
def server_get(url, request):
ctx = checkers.AuthContext()
test_ops = [bakery.Op(entity='test-op', action='read')]
auth_checker = server_bakery.checker.auth(
httpbakery.extract_macaroons(request.headers))
try:
auth_checker.allow(ctx, test_ops)
resp = response(status_code=200,
content='done')
except bakery.PermissionDenied:
caveats = [
checkers.Caveat(location='http://0.3.2.1',
condition='is-ok')
]
m = server_bakery.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=caveats, ops=test_ops)
content, headers = httpbakery.discharge_required_response(
m, '/',
'test',
'message')
resp = response(status_code=401,
content=content,
headers=headers)
return request.hooks['response'][0](resp)
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
if qs.get('token64') is None:
return response(
status_code=401,
content={
'Code': httpbakery.ERR_INTERACTION_REQUIRED,
'Message': 'interaction required',
'Info': {
'InteractionMethods': {
'agent': {'login-url': '/login'},
},
},
},
headers={'Content-Type': 'application/json'})
else:
qs = parse_qs(request.body)
content = {q: qs[q][0] for q in qs}
m = httpbakery.discharge(checkers.AuthContext(), content,
discharge_key, None, alwaysOK3rd)
return {
'status_code': 200,
'content': {
'Macaroon': m.to_dict()
}
}
auth_info = agent.load_auth_info(self.agent_filename)
@urlmatch(path='.*/login')
def login(url, request):
qs = parse_qs(urlparse(request.url).query)
self.assertEqual(request.method, 'GET')
self.assertEqual(
qs, {'username': ['test-user'], 'public-key': [PUBLIC_KEY]})
b = bakery.Bakery(key=discharge_key)
m = b.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=[bakery.local_third_party_caveat(
PUBLIC_KEY,
version=httpbakery.request_version(request.headers))],
ops=[bakery.Op(entity='agent', action='login')])
return {
'status_code': 200,
'content': {
'macaroon': m.to_dict()
}
}
with HTTMock(server_get), \
HTTMock(discharge), \
HTTMock(login):
client = httpbakery.Client(interaction_methods=[
agent.AgentInteractor(auth_info),
])
resp = requests.get(
'http://0.1.2.3/here',
cookies=client.cookies,
auth=client.auth())
self.assertEqual(resp.content, b'done')
def test_agent_legacy(self):
discharge_key = bakery.generate_key()
class _DischargerLocator(bakery.ThirdPartyLocator):
def third_party_info(self, loc):
if loc == 'http://0.3.2.1':
return bakery.ThirdPartyInfo(
public_key=discharge_key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
server_key = bakery.generate_key()
server_bakery = bakery.Bakery(key=server_key, locator=d)
@urlmatch(path='.*/here')
def server_get(url, request):
ctx = checkers.AuthContext()
test_ops = [bakery.Op(entity='test-op', action='read')]
auth_checker = server_bakery.checker.auth(
httpbakery.extract_macaroons(request.headers))
try:
auth_checker.allow(ctx, test_ops)
resp = response(status_code=200,
content='done')
except bakery.PermissionDenied:
caveats = [
checkers.Caveat(location='http://0.3.2.1',
condition='is-ok')
]
m = server_bakery.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=caveats, ops=test_ops)
content, headers = httpbakery.discharge_required_response(
m, '/',
'test',
'message')
resp = response(
status_code=401,
content=content,
headers=headers,
)
return request.hooks['response'][0](resp)
class InfoStorage:
info = None
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
if qs.get('caveat64') is not None:
content = {q: qs[q][0] for q in qs}
class InteractionRequiredError(Exception):
def __init__(self, error):
self.error = error
class CheckerInError(bakery.ThirdPartyCaveatChecker):
def check_third_party_caveat(self, ctx, info):
InfoStorage.info = info
raise InteractionRequiredError(
httpbakery.Error(
code=httpbakery.ERR_INTERACTION_REQUIRED,
version=httpbakery.request_version(
request.headers),
message='interaction required',
info=httpbakery.ErrorInfo(
wait_url='http://0.3.2.1/wait?'
'dischargeid=1',
visit_url='http://0.3.2.1/visit?'
'dischargeid=1'
),
),
)
try:
httpbakery.discharge(
checkers.AuthContext(), content,
discharge_key, None, CheckerInError())
except InteractionRequiredError as exc:
return response(
status_code=401,
content={
'Code': exc.error.code,
'Message': exc.error.message,
'Info': {
'WaitURL': exc.error.info.wait_url,
'VisitURL': exc.error.info.visit_url,
},
},
headers={'Content-Type': 'application/json'})
key = bakery.generate_key()
@urlmatch(path='.*/visit')
def visit(url, request):
if request.headers.get('Accept') == 'application/json':
return {
'status_code': 200,
'content': {
'agent': '/agent-visit',
}
}
raise Exception('unexpected call to visit without Accept header')
@urlmatch(path='.*/agent-visit')
def agent_visit(url, request):
if request.method != "POST":
raise Exception('unexpected method')
log.info('agent_visit url {}'.format(url))
body = json.loads(request.body.decode('utf-8'))
if body['username'] != 'test-user':
raise Exception('unexpected username in body {!r}'.format(request.body))
public_key = bakery.PublicKey.deserialize(body['public_key'])
ms = httpbakery.extract_macaroons(request.headers)
if len(ms) == 0:
b = bakery.Bakery(key=discharge_key)
m = b.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=[bakery.local_third_party_caveat(
public_key,
version=httpbakery.request_version(request.headers))],
ops=[bakery.Op(entity='agent', action='login')])
content, headers = httpbakery.discharge_required_response(
m, '/',
'test',
'message')
resp = response(status_code=401,
content=content,
headers=headers)
return request.hooks['response'][0](resp)
return {
'status_code': 200,
'content': {
'agent_login': True
}
}
@urlmatch(path='.*/wait$')
def wait(url, request):
class EmptyChecker(bakery.ThirdPartyCaveatChecker):
def check_third_party_caveat(self, ctx, info):
return []
if InfoStorage.info is None:
self.fail('visit url has not been visited')
m = bakery.discharge(
checkers.AuthContext(),
InfoStorage.info.id,
InfoStorage.info.caveat,
discharge_key,
EmptyChecker(),
_DischargerLocator(),
)
return {
'status_code': 200,
'content': {
'Macaroon': m.to_dict()
}
}
with HTTMock(server_get), \
HTTMock(discharge), \
HTTMock(visit), \
HTTMock(wait), \
HTTMock(agent_visit):
client = httpbakery.Client(interaction_methods=[
agent.AgentInteractor(
agent.AuthInfo(
key=key,
agents=[agent.Agent(username='test-user',
url=u'http://0.3.2.1')],
),
),
])
resp = requests.get(
'http://0.1.2.3/here',
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(resp.content, b'done')
agent_file = '''
{
"key": {
"public": "YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=",
"private": "CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU="
},
"agents": [{
"url": "https://1.example.com/",
"username": "user-1"
}, {
"url": "https://2.example.com/discharger",
"username": "user-2"
}, {
"url": "http://0.3.2.1",
"username": "test-user"
}]
}
'''
bad_key_agent_file = '''
{
"key": {
"public": "YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=",
"private": "CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJE=="
},
"agents": [{
"url": "https://1.example.com/",
"username": "user-1"
}, {
"url": "https://2.example.com/discharger",
"username": "user-2"
}]
}
'''
no_username_agent_file = '''
{
"key": {
"public": "YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=",
"private": "CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU="
},
"agents": [{
"url": "https://1.example.com/"
}, {
"url": "https://2.example.com/discharger",
"username": "user-2"
}]
}
'''
class ThirdPartyCaveatCheckerF(bakery.ThirdPartyCaveatChecker):
def __init__(self, check):
self._check = check
def check_third_party_caveat(self, ctx, info):
cond, arg = checkers.parse_caveat(info.condition)
return self._check(cond, arg)
alwaysOK3rd = ThirdPartyCaveatCheckerF(lambda cond, arg: [])
|
go-macaroon-bakery/py-macaroon-bakery
|
macaroonbakery/tests/test_agent.py
|
Python
|
lgpl-3.0
| 15,700
|
[
"VisIt"
] |
e6c4c80bda3c07900bffe04f0a2f7f4707c1bc06a54e99251f13a256241148e1
|
"""Kernels"""
# Code
# Copyright (c) 2013-2016, Massachusetts Institute of Technology
# Copyright (c) 2016-2022, Alex Gorodetsky
#
# This file is part of GPEXP:
# Author: Alex Gorodetsky alex@alexgorodetsky
#
# GPEXP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# GPEXP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GPEXP. If not, see <http://www.gnu.org/licenses/>.
# Code
import numpy as np
import math
class Kernel(object):
"""This is a Kernel Class."""
nugget = 0.0
hyperParam = dict({})
def __init__(self, hyperParam, dimension, *argc):
""" Initializes Kernel class """
self.dimension = dimension
self.hyperParam = hyperParam
super(Kernel,self).__init__()
def updateHyperParameters(self, hyperParamNew):
for key in hyperParamNew.keys():
assert key in self.hyperParam.keys(), (key, " is not a valid hyperParameter")
self.hyperParam = hyperParamNew
def evaluate(self, x1, x2):
assert len(x2.shape) > 1 and len(x1.shape) > 1, "Must supply nd arrays to evaluation function"
nPointsx1 = x1.shape[0]
nPointsx2 = x2.shape[0]
assert x1.shape[1] == self.dimension and x2.shape[1] == self.dimension, \
( " Incorrect dimension of input points fed to kernel ", x1.shape, x2.shape)
if nPointsx1 > nPointsx2:
out = self.evaluateF(x1, np.tile(x2, (nPointsx1, 1)))
elif nPointsx1 < nPointsx2:
out = self.evaluateF(np.tile(x1, (nPointsx2, 1)), x2)
else:
out = self.evaluateF(x1,x2)
return out
def evaluateF(self, x1, x2):
""" Default kernel is constant 0 """
return 0
class KernelIsoMatern(Kernel):
""" Matern Kernel """
def __init__(self, rho, signalSize, dimension, nu=3.0/2.0):
#note nu is not treated as hyperparamter
hyperParam = dict({'rho':rho, 'signalSize':signalSize})
self.nu = nu
super(KernelIsoMatern, self).__init__(hyperParam, dimension)
def evaluateF(self, x1, x2):
""" Private evaluate method in which x1 and x2 are the same shape """
assert x1.shape == x2.shape, "__evaluate() received non-equal shaped point sets"
if np.abs(1.5-self.nu) < 1e-10:
#d = np.linalg.norm(x1-x2)#np.sqrt(np.sum((x1-x2)**2.0,axis=1))
d = np.sqrt(np.sum((x1-x2)**2.0,axis=1))
term = np.sqrt(3)*d/self.hyperParam['rho']
out = self.hyperParam['signalSize']* (1.0+term)* np.exp (-term)
#super(KernelIsoMatern, self).evaluateF(x1,x2)
return out
def derivativeWrtHypParams(self, x1, x2):
#Assume that derivative is taken at current hyperparameters
assert x1.shape == x2.shape, "__evaluate() received non-equal shaped point sets"
raise AttributeError("derivativeWrtHypParams not implemented for KernelIsoMatern")
class KernelSquaredExponential(Kernel):
""" exp(- (x-x')^2/(2*l^2) """
def __init__(self, correlationLength, signalSize, dimension):
hyperParam = dict({})
if len(correlationLength) == 1:
correlationLength = np.tile( correlationLength, (dimension))
for ii in range(len(correlationLength)):
hyperParam['cl'+str(ii)] = correlationLength[ii]
hyperParam['signalSize'] = signalSize
super(KernelSquaredExponential, self).__init__(hyperParam, dimension)
def evaluateF(self, x1, x2):
""" Private evaluate method in which x1 and x2 are the same shape """
assert x1.shape == x2.shape, "__evaluate() received non-equal shaped point sets"
cl = np.zeros((self.dimension))
for ii in range(self.dimension):
cl[ii] = self.hyperParam['cl'+str(ii)]
out = self.hyperParam['signalSize']* \
np.exp (-0.5 * np.sum((x1-x2)**2.0*np.tile(cl**-2.0, (x1.shape[0],1)),axis=1) )
return out
def derivativeWrtHypParams(self, x1, x2):
#Assume that derivative is taken at current hyperparameters
assert x1.shape == x2.shape, "__evaluate() received non-equal shaped point sets"
cl = np.zeros((self.dimension))
for ii in range(self.dimension):
cl[ii] = self.hyperParam['cl'+str(ii)]
out = {}
evals = self.evaluateF(x1, x2)
for key in self.hyperParam.keys():
if key == 'signalSize':
out[key] = np.exp (-0.5 * np.sum((x1-x2)**2.0*np.tile(cl**-2.0, (x1.shape[0],1)),axis=1) )
else:
direction = float(key[2:])# which direction
out[key] = evals*(x1[:,direction]-x2[:,direction])**2.0 \
/ cl[direction]**3.0
return out
def derivative(self, x1, x2, version=0):
""" Squared Exponential
input:
point1 : ndarray
point2 : 1xdimension
output:
if version == 0
evaluation : float or ndarray
derivative of Gaussian function around point2
out[jj, ii] = dK(point1[jj,:], point2) / d point1[jj,ii]
if version == 1:
evaluation :float or ndarray
out[jj, ii] = dK(point2, point1[jj,:])/ point2[ii]
This function defines the Gaussian kernel Derivative
Note:
If we define K(x_1) = K(x_1, x_2)
then this function computes dK(x_1)/dx_1 which is vector valued (size of dimension
"""
assert len(x2.shape) > 1 and len(x1.shape) > 1, "Must supply nd arrays to evaluation function"
assert x2.shape[0] == 1 and x2.shape[1] == self.dimension, "x2 not in correct shape"
assert x1.shape[0] > 0 and x1.shape[1] == self.dimension, "x1 not in correct shape"
cl = np.zeros((self.dimension))
for ii in range(self.dimension):
cl[ii] = self.hyperParam['cl'+str(ii)]
if version == 0 or version == 1:
nPointsx1 = x1.shape[0]
rEvals = self.evaluate(x1,x2)
out = -self.hyperParam['signalSize']*0.5*2 * (x1 - np.tile(x2, (nPointsx1,1)))/ \
np.tile(cl**2.0, (nPointsx1,1))* \
np.tile(np.reshape(rEvals, (x1.shape[0],1)), ((1,self.dimension)))
return out
class KernelMehlerND(Kernel):
""" Mehler kernel in ND """
def __init__(self, tIn, dimension):
hyperParam = dict({})
self.oneDKern = []
for ii in range(dimension):
hyperParam[ii] = tIn[ii]
self.oneDKern.append(KernelMehler1D(tIn[ii],1))
super(KernelMehlerND, self).__init__(hyperParam, dimension)
def updateHyperParameters(self, params):
for keys in self.hyperParam.keys():
self.hyperParam[keys] = params[keys]
for ii in range(self.dimension):
self.oneDKern[ii].updateHyperParameters(dict({'t':self.hyperParam[ii]}))
def evaluateF(self, x1, x2):
"""
Parameter
--------
x1 : 1darray
n x dimension
x2 : 1darray
n x dimension
Returns
-------
evaluation : float or 1darray
Notes
-----
"""
assert x1.shape == x2.shape, "__evaluate() received non-equal shaped point sets"
nPoints = x1.shape[0]
out = np.ones((x1.shape[0]))
#print "x1 ", x1
#print "x2 ", x2
for ii, kern in enumerate(self.oneDKern):
newVal = kern.evaluate(x1[:,ii].reshape((nPoints,1)),x2[:,ii].reshape((nPoints,1)))
#print "newVal ", newVal
out = out*newVal
return out
def derivative(self, x1, x2):
""" Derivative of Mehler 2D kernel
input:
point1 : ndarray
point2 : 1xdimension
output:
evaluation : float or ndarray
derivative of Gaussian function around point2
out[jj, ii] = dK(point1[jj,:], point2) / d point1[jj,ii]
This function defines the Mehler Kernel Derivative
Note:
If we define K(x_1) = K(x_1, x_2)
then this function computes dK(x_1)/dx_1 which is vector valued (size of dimension
"""
raise AttributeError("derivative of KernelMehlerND not yet implemented")
class KernelMehler1D(Kernel):
""" This function defines the 1d mehler hermite kernel. """
def __init__(self, tIn, dimension):
assert dimension==1, "Mehler Hermite Kernel is only one dimensional"
hyperParam = dict({})
hyperParam['t'] = tIn
super(KernelMehler1D, self).__init__(hyperParam, dimension)
def evaluateF(self, x1, x2):
"""
Parameter
--------
x1 : 1darray
n x dimension
x2 : 1darray
n x dimension
Returns
-------
evaluation : float or 1darray
Notes
-----
"""
assert x1.shape[1]== 1 and x2.shape[1]==1, \
"Hermite1d kernel only accepts one dimensional points"
assert x1.shape == x2.shape, "__evaluate() received non-equal shaped point sets"
out = (1.0 - self.hyperParam['t']**2.0)**(-1.0/2.0) * \
np.exp( - ( x1**2.0*self.hyperParam['t']**2.0 -
2.0 * self.hyperParam['t'] * x1 * x2 + x2**2.0 * self.hyperParam['t']**2.0) \
/ (2.0 * (1.0 - self.hyperParam['t']**2.0)))
#out = (2.0*np.pi)**-1.0 * out
if math.isnan(out[0,0]):
print("xs ", x1[0,:], x2[0,:])
print("t", self.hyperParam['t'])
print('NAN in kernel hermi1d exiting')
exit()
return np.reshape(out, (x1.shape[0]))
def derivative(self, x1, x2):
""" Derivative of Mehler 1D kernel
input:
point1 : ndarray
point2 : 1xdimension
output:
evaluation : float or ndarray
derivative of Gaussian function around point2
out[jj, ii] = dK(point1[jj,:], point2) / d point1[jj,ii]
This function defines the Mehler Kernel Derivative
Note:
If we define K(x_1) = K(x_1, x_2)
then this function computes dK(x_1)/dx_1 which is vector valued (size of dimension
"""
assert len(x2.shape) > 1 and len(x1.shape) > 1, "Must supply nd arrays to evaluation function"
assert x2.shape[0] == 1 and x2.shape[1] == self.dimension, "x2 not in correct shape"
assert x1.shape[0] > 0 and x1.shape[1] == self.dimension, "x1 not in correct shape"
nPointsx1 = x1.shape[0]
rEvals = self.evaluate(x1,x2)
out = -0.5 * ( 2.0* x1 * self.hyperParam['t']**2.0 -\
2.0*self.hyperParam['t']*np.tile(x2, (nPointsx1,1)))/ \
(1.0-self.hyperParam['t']**2.0) *\
np.reshape(rEvals, (nPointsx1,1))
return out
|
goroda/GPEXP
|
gpExp/kernels.py
|
Python
|
gpl-2.0
| 11,780
|
[
"Gaussian"
] |
6cefbf80177b0030901f0c3cc04f03b5a65e344904732a0da3cb9bda8af47692
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2022 Stanford University and the Authors
#
# Authors: Peter Eastman
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from mdtraj.formats import PDBxTrajectoryFile
from mdtraj.testing import eq
from mdtraj import load
import math
import tempfile
import pytest
try:
import openmm
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_pdbx.py needs OpenMM.')
def test_convert(get_fn):
for filename in ['2EQQ.pdb', '4OH9.pdb']:
# Load a PDB file.
traj1 = load(get_fn(filename))
with tempfile.NamedTemporaryFile(suffix='.pdbx', mode='w', delete=False) as file:
# Save it in PDBx/mmCIF format.
pdbx1 = PDBxTrajectoryFile(file.name, mode='w')
for i in range(traj1.n_frames):
pdbx1.write(traj1.xyz[i], traj1.topology, traj1.unitcell_lengths, traj1.unitcell_angles)
pdbx1.close()
# Load the PDBx/mmCIF file and make the result is identical.
traj2 = load(file.name)
assert eq(traj1.n_frames, traj2.n_frames)
assert eq(traj1.n_atoms, traj2.n_atoms)
assert eq(traj1.xyz, traj2.xyz)
assert eq(traj1.unitcell_lengths, traj2.unitcell_lengths)
assert eq(traj1.unitcell_angles, traj2.unitcell_angles)
for a1, a2 in zip(traj1.topology.atoms, traj2.topology.atoms):
assert eq(a1, a2)
# Try loading just a subset of the frames and atoms.
traj3 = load(file.name, atom_indices=range(10, 20), stride=2)
assert eq(traj3.n_frames, math.ceil(traj1.n_frames/2))
assert eq(traj3.n_atoms, 10)
assert eq(traj1.xyz[::2, 10:20], traj3.xyz)
atoms1 = list(traj1.topology.atoms)[10:20]
for a1, a2 in zip(atoms1, traj3.topology.atoms):
assert eq(a1.name, a2.name)
|
mdtraj/mdtraj
|
tests/test_pdbx.py
|
Python
|
lgpl-2.1
| 2,869
|
[
"MDTraj",
"OpenMM"
] |
21eda4794db6efdaeaeb652bbbadb0a916dce9d8159c5e432a62bcbeaac081b6
|
from mpi4py import MPI
import pyCore
import sys, getopt
def main(argv):
model = ''
mesh = ''
try:
opts, args = getopt.getopt(argv,"hg:m:",["model=","mesh="])
except getopt.GetoptError:
print('test_pytCore.py -g <model> -m <mesh>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test_pytCore.py -g <model> -m <mesh>')
sys.exit()
elif opt in ("-g", "--model"):
model = arg
elif opt in ("-m", "--mesh"):
mesh = arg
print('Model file is "', model)
print('Mesh file is "', mesh)
# PCU initialization
pyCore.PCU_Comm_Init()
# gmi initialization
pyCore.gmi_register_mesh()
# load the mesh and model and write the initial mesh to vtk
mesh = pyCore.loadMdsMesh(model, mesh)
pyCore.writeASCIIVtkFiles('before', mesh);
# setup uniform refiner and call mesh adapt
ma_input = pyCore.configureUniformRefine(mesh, 2);
pyCore.adapt(ma_input);
# write the adapted mesh to vtk
pyCore.writeASCIIVtkFiles('after', mesh);
# gmi finalization
pyCore.PCU_Comm_Free()
if __name__ == "__main__":
main(sys.argv[1:])
|
SCOREC/core
|
python_wrappers/test_pyCore.py
|
Python
|
bsd-3-clause
| 1,149
|
[
"VTK"
] |
9f8b396e5cdf7f5d87a168efedfd4f0b8001a1b83100d611098e617993805169
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.AndreaConsadori.Colubris'
VERSION = '2.0'
AUTHOR = 'Andrea Consadori'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori']
PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori', 'ZenPacks.AndreaConsadori.Colubris']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.2'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
anksp21/Community-Zenpacks
|
ZenPacks.AndreaConsadori.Colubris/setup.py
|
Python
|
gpl-2.0
| 3,300
|
[
"VisIt"
] |
2a6cf42e4ab7cbdbd7bfaadc9785513a0c0524286b8358c9d2957f6da73fe587
|
#Author: Kevin Boehme
#Date: 12-20-2014
#This program is part of a TnSeq analysis pipeline, designed to take raw fastq or fasta reads, and produce tabulated data on hop count occurrence.
import sys
sys.dont_write_bytecode = True
import ConfigParser
import logging
import gzip
import os
import glob
import subprocess
import re
from objects import *
from time import time
from datetime import datetime
class hops_pipeline(object):
"""docstring for hops_pipeline"""
def __init__(self):
########### From config ###########
#Inputs
self.input_files = []
self.ref = ""
self.ptt = []
self.out = ""
#Parameters
self.transposon = ""
self.mismatches = 0
self.minbaseoffset = 0
self.maxbaseoffset = 5
self.gene_trim = 0
self.read_length = 0
self.minimum_hop_count = 0
#bools
self.debug = False
self.normalize = True
self.delete_intermediate_files = True
self.check_transposon = True
self.reverse_complement_reads = False
self.igv_normalize = False
self.negateIGV = False
########### End From Config ###########
#Intermediate files for output
self.int_prefix = []
self.int_trimmed = [] # list of files for the intermediate trimmed read files.
self.int_sam = [] # list of sam output files.
self.tabulated_filename = ""
self.gene_tabulated_filename = ""
self.intergenic_filename = ""
self.igv_filenames = {} # key: ref_name | value: file
self.num_conditions = 0
#information variables
self.removed_notn = 0
self.removed_tooshort = 0
self.kept_reads = 0
self.original_read_count = 0
self.starttime = time()
# Variables for tabulating hop hits.
self.chromosomes = {}
self.sam_file_contents = {}
self.normalization_coefficients = []
############ Read Config File and Run Pipeline ###############################
def read_config(self, config_path):
cp = ConfigParser.RawConfigParser()
try:
cp.read(config_path)
except:
sys.exit("Error reading config file.")
# input paths
self.input_files = glob.glob(cp.get('input', 'Reads'))
self.ref = cp.get('input', 'BowtieReference')
self.ptt = glob.glob(cp.get('input', 'Ptt'))
self.out = cp.get('input', 'Out')
if len(self.input_files) == 0:
sys.exit('Error with input Reads parameter.')
elif self.ref == None:
sys.exit('Error with BowtieReference parameter.')
elif len(self.ptt) == 0:
sys.exit('Error with Ptt parameter.')
elif self.out == None:
sys.exit("Error with Out parameter.")
#Parameters
try:
self.minbaseoffset = int(cp.get('parameters', 'MinBaseOffset'))
except:
sys.exit('Error with MinBaseOffset parameter')
try:
self.maxbaseoffset = int(cp.get('parameters', 'MaxBaseOffset'))
except:
sys.exit('Error with MaxBaseOffset parameter')
try:
self.transposon = cp.get('parameters', 'Transposon')
except:
sys.exit('Error with Transposon parameter.')
if self.transposon == "":
print "Transposon parameter was empty. This means no check will be made for a transposon sequence and all reads will move to the mapping stage."
self.check_transposon = False
elif not (re.match('^[ACGTacgt]+$',self.transposon)):
sys.exit('Error with Transposon parameter (Make sure it only contains [ATCG]).')
try:
self.mismatches = int ( cp.get('parameters', 'Mismatches') )
except:
sys.exit('Error with Mismatches parameter (Not an integer).')
if self.mismatches < 0:
sys.exit('Mismatches parameter is negative.')
elif self.check_transposon and self.mismatches >= len(self.transposon):
sys.exit("Mismatches parameter is same length or greater than transposon sequence (Note if you dont want to check for a transposon sequence, leave it blank in the config file.")
try:
self.gene_trim = int ( cp.get('parameters', 'GeneTrim') )
except:
sys.exit('Error with GeneTrim parameter (Not an integer).')
if self.gene_trim < 0:
sys.exit('GeneTrim parameter is negative.')
elif self.gene_trim > 49:
sys.exit('Error with GeneTrim parameter (Must be 49 or smaller).')
try:
self.read_length = int ( cp.get('parameters', 'ReadLength') )
except:
sys.exit('Error with ReadLength parameter (Not an integer).')
if self.read_length < 0:
sys.exit('ReadLength parameter is negative.')
try:
self.minimum_hop_count = int ( cp.get('parameters', 'MinimumHopCount'))
except:
sys.exit('Error with MinimumHopCount parameter (Not an integer).')
if self.minimum_hop_count < 0:
sys.exit('MinimumHopCount parameter is negative.')
#Options
try:
self.debug = cp.getboolean('options', 'Debug')
except:
sys.exit('Error with Debug parameter (Not True/False).')
try:
self.normalize = cp.get('options','Normalize')
if self.normalize != "Intergenic" and self.normalize != "Total":
sys.exit('Normalize parameter not one of the following [Intergenic, Total].')
except:
sys.exit('Error with Normalize parameter.')
try:
self.delete_intermediate_files = cp.getboolean('options','DeleteIntermediateFiles')
except:
sys.exit('Error with DeleteIntermediateFiles parameter (Not True/False)')
try:
self.reverse_complement_reads = cp.getboolean('options','ReverseComplementReads')
except:
sys.exit('Error with ReverseComplementReads parameter')
try:
self.igv_normalize = cp.getboolean('options','IGVNormalize')
except:
sys.exit('Error with IGVNormalize parameter')
try:
self.negateIGV = cp.getboolean('options','IGVNegateNegStrand')
except:
sys.exit('Error with IGVNormalize parameter')
# Generate other variables
self.num_conditions = len(self.input_files)
self.output_directory = os.path.dirname(config_path)
if self.output_directory == "":
self.output_directory = "./"
else:
self.output_directory += "/"
self.tabulated_filename = self.output_directory + "output_files/" + self.out + "-HOPS.txt"
self.gene_tabulated_filename = self.output_directory + "output_files/" + self.out + "-GENE.txt"
self.intergenic_filename = self.output_directory + "output_files/" + self.out + "-INTERGENIC.txt"
if not os.path.exists( self.output_directory + "output_files/"):
subprocess.check_output(["mkdir", self.output_directory + "output_files"])
self.set_up_logger(self.output_directory + "output_files/" + self.out + ".log")
with open(config_path,'r') as f:
logging.info( bcolors.HEADER + "\n\n=============== Config File Settings ===============\n\n"
+ bcolors.ENDC + ''.join(f.readlines()) + bcolors.HEADER
+ "\n--------------------------------------\n\n" + bcolors.ENDC)
if not os.path.exists( self.output_directory + "intermediate_files"):
subprocess.check_output(["mkdir", self.output_directory + "intermediate_files"])
for f in self.input_files:
file_prefix = os.path.basename(f).split('.')[0]
self.int_prefix.append(file_prefix)
#create both trimmed and sam intermediate names for this particular input file.
trimmed = ""
fi = None
trimmed = self.output_directory + "intermediate_files/" + file_prefix + "-trimmed.fasta"
fi = open(trimmed,'w')
self.int_trimmed.append(fi)
self.int_sam.append(self.output_directory + "intermediate_files/" + file_prefix + ".sam")
def run_pipeline(self):
self.process_reads()
self.call_bowtie2()
self.process_sam()
self.print_time_output("Total run time,", self.starttime)
############ Useful Functions ###############################
def set_up_logger(self, log_file_name):
logging.basicConfig( filename=log_file_name , filemode='w', level=logging.INFO,format='%(message)s' )
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(bcolors.OKGREEN + "=========================================")
logging.info(" Tn-Seq Pipeline")
logging.info("The MIT License (MIT) Copyright (c) 2014 Kevin")
logging.info(" Date: "+ str(datetime.now()))
logging.info(" Author: Kevin Boehme")
logging.info(" Email: kevinlboehme@gmail.com")
logging.info("=========================================" + bcolors.ENDC)
def debugger(self, *text):
if self.debug:
sys.stdout.write("[DEBUG] ")
for i in text:
sys.stdout.write(bcolors.OKBLUE + str(i) + " " + bcolors.ENDC)
print
def print_time_output(self, command, start):
time_to_run = time()-start
if time_to_run > 60:
time_to_run = time_to_run/60
logging.info(command + " ran in " + "%.2f" % (time_to_run) + " minutes." + "\n")
else:
logging.info(command + " ran in " + "%.2f" % (time_to_run) +" seconds." + "\n")
def fuzzy_match_beginning(self, pattern, genome, mismatches):
for i in range(self.minbaseoffset, self.maxbaseoffset+1):
chunk = genome[i : i + len(pattern)]
# now compare chunk with pattern to see if they match with at least mismatches.
if(self.compareChunks(pattern, chunk, mismatches)):
return i+len(pattern)
return -1
def compareChunks(self, pattern, chunk, mismatches):
misses = 0
for i in range(len(pattern)):
if(pattern[i] != chunk[i]):
misses = misses + 1
if(misses > mismatches):
return False
return True
def reverse_complement(self, sequence):
reverse_complement = []
for index in range(len(sequence) - 1, -1 , -1):
if(sequence[index] == "A"):
reverse_complement.append("T")
if(sequence[index] == "T"):
reverse_complement.append("A")
if(sequence[index] == "C"):
reverse_complement.append("G")
if(sequence[index] == "G"):
reverse_complement.append("C")
return ''.join(reverse_complement)
############ Process Reads ###############################
def print_summary_stats(self, start_process_reads_time, out_file_num):
prob = float(self.removed_notn)/float(self.original_read_count)
#Done processing file, print out stats.
self.print_time_output("Processing " + str(self.original_read_count) + " reads ", start_process_reads_time)
logging.info("Removed " + str(self.removed_notn)
+ " reads with no detected transposon sequence ("
+ str(self.removed_notn) + "/"+ str(self.original_read_count)
+ ") = " + "%.2f%%" % (prob * 100) + ".")
logging.info("Removed " + str(self.removed_tooshort)
+ " reads that were too short (Less than " + str(self.read_length) + " bp after transposon trimming).")
logging.info("")
logging.info("Kept " + str(self.kept_reads) + " reads written to " + self.int_trimmed[out_file_num].name+"\n")
logging.info(bcolors.WARNING + " ---------------\n" + bcolors.ENDC)
def process_reads(self):
logging.info( bcolors.HEADER + "\n\n=============== Read pre-processing ===============\n" + bcolors.ENDC)
for out_file_num in range(self.num_conditions):
start_process_reads_time = time()
filepath = self.input_files[out_file_num]
self.removed_notn = 0
self.removed_tooshort = 0
self.kept_reads = 0
self.original_read_count = 0
isfastq = None
if filepath.find("fasta") != -1:
isfastq = False
elif filepath.find("fastq") != -1:
isfastq = True
else:
logging.error("Didn't find the string fasta or fastq in input files. Please make sure your data has a fasta or fastq file extension (Could also be 2ped).")
sys.exit('')
logging.info( "Input fastq = " + filepath + "." )
f = None
if (filepath[-3:] == ".gz"):
f = gzip.open(filepath)
else:
f = open(filepath)
if isfastq:
self.read_fastq(f, out_file_num)
else:
self.read_fasta(f, out_file_num)
self.print_summary_stats(start_process_reads_time, out_file_num)
for f in self.int_trimmed:
f.close()
logging.info( bcolors.HEADER + "--------------------------------------\n\n" + bcolors.ENDC)
def read_fastq(self, f, out_file_num):
t0 = time()
while True:
if self.original_read_count != 0 and self.original_read_count % 1000000 == 0:
sys.stdout.write('\rProcessed ' + str(self.original_read_count) + ' reads.')
sys.stdout.flush()
# logging.info('Processed ' + str(self.original_read_count) + ' reads.')
name = f.readline().strip()[1:] #Strip the @ sign infront of fastq names.
seq = f.readline().strip().upper()
plus = f.readline().strip()
score = f.readline().strip()
if not name or not seq or not plus or not score:
break #We are done, lets break out of the loop.
#we have all the contents of one read, now lets look for the transposon.
self.process_read(name, seq, out_file_num)
self.original_read_count += 1
else: #Its a fasta file.
while True:
if self.original_read_count != 0 and self.original_read_count % 1000000 == 0:
sys.stdout.write('\rProcessed ' + str(self.original_read_count) + ' reads.')
# logging.info('Processed ' + str(self.original_read_count) + ' reads.')
name = f.readline().strip()[1:] #Strip the @ sign infront of fastq names.
seq = f.readline().strip().upper()
if not name or not seq: break #We are done, lets break out of the loop.
#we have all the contents of one read, now lets look for the transposon.
self.process_read(name, seq, out_file_num)
self.original_read_count += 1
def read_fasta(self, f, out_file_num):
t0 = time()
while True:
if self.original_read_count != 0 and self.original_read_count % 1000000 == 0:
sys.stdout.write('\rProcessed ' + str(self.original_read_count) + ' reads.')
# logging.info('Processed ' + str(self.original_read_count) + ' reads.')
name = f.readline().strip()[1:] #Strip the @ sign infront of fastq names.
seq = f.readline().strip().upper()
if not name or not seq: break #We are done, lets break out of the loop.
self.process_read(name, seq, out_file_num)
self.original_read_count += 1
def process_read(self, name, seq, out_file_num):
if self.reverse_complement_reads:
seq = self.reverse_complement(seq)
tn_trimmed_seq = ""
if self.check_transposon:
tn_trimmed_seq = self.remove_transposon(name, seq)
else:
tn_trimmed_seq = seq
if tn_trimmed_seq: # True if sequence had a transposon. False otherwise.
prepped_seq = self.qc_genomic_region(tn_trimmed_seq)
if prepped_seq:
#write to fasta file.
self.kept_reads += 1
self.write_to_output(name, prepped_seq, self.int_trimmed[out_file_num])
else:
self.removed_tooshort += 1
pass
else: #no match.
self.removed_notn += 1
pass
def write_to_output(self, name, seq, f):
f.write(">"+name+"\n")
f.write(seq+"\n")
def remove_transposon(self, name, seq):
num = self.fuzzy_match_beginning( self.transposon, seq, self.mismatches)
#capture only the bacterial genome region.
tn_trimmed_seq = seq[num:]
if num != -1:
return tn_trimmed_seq
else:
return None
def qc_genomic_region(self, seq):
if len(seq) < self.read_length:
return None
else:
return seq[0:self.read_length]
############ Bowtie2 ###############################
def call_bowtie2(self):
logging.info( bcolors.HEADER + "\n\n=============== Bowtie2 Mapping ===============\n" + bcolors.ENDC)
for out_file_num in range(self.num_conditions):
start_time = time()
#bowtie_command = ["bowtie2", "-x", self.ref,"--phred33",
# "-f",self.int_trimmed[out_file_num].name, "-D" , "25" , "-R" ,"3", "-L" , "10" , "-i" , "S,1,0.50" ,
# "-S",self.int_sam[out_file_num], "--no-hd"]
bowtie_command = ["bowtie2", "-x", self.ref,"--phred33",
"-f",self.int_trimmed[out_file_num].name,
"-S",self.int_sam[out_file_num]]
logging.info("Bowtie Command Used: " + ' '.join(bowtie_command)+"\n\n")
logging.info("Writing output to = " + self.int_sam[out_file_num]+"\n")
try:
logging.info(subprocess.check_output(bowtie_command,stderr=subprocess.STDOUT))#,shell=True))
except:
logging.error("Bowtie2 doesn't seem to be installed. Make sure it can be run with the command: bowtie2")
sys.exit('Exiting')
self.print_time_output("Bowtie2",start_time)
if self.delete_intermediate_files:
logging.info("Deleted trimmed Fasta file.")
subprocess.check_output(["rm",self.int_trimmed[out_file_num].name])
else:
logging.info("Zipping up trimmed file.\n")
subprocess.check_output(["gzip","-f", self.int_trimmed[out_file_num].name])
logging.info(bcolors.WARNING + " ---------------\n" + bcolors.ENDC)
logging.info( bcolors.HEADER + "--------------------------------------\n\n" + bcolors.ENDC)
return True
############ Tabulate Sams ###############################
def process_sam(self):
start_time = time()
logging.info( bcolors.HEADER + "\n\n=============== Process SAM File ===============\n" + bcolors.ENDC)
self.prepare_gene_info()
self.add_intergenic_regions_and_order_column()
self.read_sam_file()
self.tabulate_gene_hits()
self.get_normalized_coefficients()
self.prepare_igv_files()
self.write_output()
logging.info(bcolors.WARNING + " ---------------\n" + bcolors.ENDC)
self.print_time_output("Done processing SAM files,", start_time)
logging.info( bcolors.HEADER + "--------------------------------------\n\n" + bcolors.ENDC)
def prepare_gene_info(self):
self.debugger("On function: prepare_gene_info")
start_time = time()
for i,file_name in enumerate(self.ptt):
name = os.path.basename(file_name).split('.')[0]
with open(file_name, 'r') as f:
new_chrom = Chromsome(name)
# Location Strand Length PID Gene Synonym Code COG Product
title = f.readline() # Title
num_prot = f.readline() # number of proteins
f.readline() # header
new_chrom.fill_info(title, num_prot)
self.chromosomes[name] = new_chrom
our_genes = []
for line in f:
ptt_entry = line.split()
new_gene = Gene()
new_gene.create_from_ptt_entry(ptt_entry,self.gene_trim, self.num_conditions)
our_genes.append(new_gene)
self.chromosomes[name].set_gene_list(sorted(our_genes))
def add_intergenic_regions_and_order_column(self):
self.debugger("On function: add_intergenic_regions_and_order_column")
for j, (ref_name, chrom) in enumerate(self.chromosomes.iteritems()):
self.debugger("On replicon = " + ref_name)
example_gene = chrom.gene_list[0]
self.debugger("Example gene from this replicon = " + str(example_gene))
gene_name = example_gene.synonym
prefix = "".join(re.findall("[a-zA-Z]+", gene_name))
self.debugger("Prefix = " + prefix)
num_genes = len(chrom.gene_list)
self.debugger("Number of genes on replicon = " + str(num_genes))
zfill_digits = len(str(num_genes))
self.debugger("Digits to use in zfill = " + str(zfill_digits))
intergenic_genes = set()
for i,cur_gene in enumerate(chrom.gene_list,start=1):
ordered_name = prefix + str(i).zfill(zfill_digits)
cur_gene.order_code = ordered_name
# Now lets add intergenic regions.
new_gene = Gene()
if i == 1: # On the first gene.
if cur_gene.start > 1: # We have some room to capture.
new_gene.create_intergenic_region(1, cur_gene.start - 1, "int_BEG-"+cur_gene.synonym, self.num_conditions)
intergenic_genes.add(new_gene)
if cur_gene.end + 1 < chrom.gene_list[i].start: # We have some room to capture.
new_gene.create_intergenic_region(cur_gene.end + 1,
chrom.gene_list[i].start - 1,
"int_" + cur_gene.synonym + "-" + chrom.gene_list[i].synonym, self.num_conditions)
intergenic_genes.add(new_gene)
elif i == chrom.num_proteins: # On the last gene.
if cur_gene.end < chrom.end: # We have some room to capture.
new_gene.create_intergenic_region(cur_gene.end + 1, chrom.end, "int_"+cur_gene.synonym+"-END", self.num_conditions)
intergenic_genes.add(new_gene)
else: # Make sure the end of the current gene and the beginning of the next gene have a space.
if cur_gene.end + 1 < chrom.gene_list[i].start: # We have some room to capture.
new_gene.create_intergenic_region(cur_gene.end + 1,
chrom.gene_list[i].start - 1,
"int_" + cur_gene.synonym + "-" + chrom.gene_list[i].synonym, self.num_conditions )
intergenic_genes.add(new_gene)
#Now lets smash those intergenic regions with the current genes.
self.chromosomes[ref_name].gene_list = sorted(self.chromosomes[ref_name].gene_list + list(intergenic_genes))
logging.info(bcolors.WARNING + " ---------------\n" + bcolors.ENDC)
def read_sam_file(self):
self.debugger("On function: read_sam_file")
for i in self.chromosomes:
self.sam_file_contents[str(i)] = {}
for i,sam_file in enumerate(self.int_sam):
start_time = time()
treatment = self.int_prefix[i]
logging.info("Reading file = " + sam_file +".")
with open(sam_file) as f:
#num_lines = float(sum(1 for line in f))
#f.seek(0)
for j,line in enumerate(f):
#self.update_progress(float(j)/num_lines)
if line[0] == "@": #Pass the headers
pass
else:
sam_entry = line.split()
code = sam_entry[1]
pos = int ( sam_entry[3] )
if code == "4": # unmapped
pass
elif code == "0" or code == "16":
ref_name = sam_entry[2].split('|')[3][:-2]
strand = '+'
if code == "16":
strand = '-'
hop_exists = False
if pos in self.sam_file_contents[ref_name]:
self.sam_file_contents[ref_name][pos].increment_hop_count(i)
else:
new_hop = HopSite(pos, self.negateIGV, strand, self.num_conditions)
new_hop.increment_hop_count(i)
self.sam_file_contents[ref_name][pos] = new_hop
logging.info("")
self.print_time_output("Reading file", start_time)
if self.delete_intermediate_files:
logging.info("Deleting SAM file.")
subprocess.check_output(["rm",sam_file])
else:
logging.info("Zipping up SAM file.")
subprocess.check_output(["gzip", "-f", sam_file])
self.filter_on_min_hops()
for ref, value in self.sam_file_contents.iteritems():
temp_dict = {}
for i,item in enumerate(sorted(value.iteritems())):
temp_dict[i] = item[1]
self.sam_file_contents[ref] = temp_dict
logging.info(bcolors.WARNING + " ---------------\n" + bcolors.ENDC)
def filter_on_min_hops(self):
for ref, positions in self.sam_file_contents.iteritems():
for index,hop in positions.items():
if hop.total_hops() < self.minimum_hop_count:
del positions[index]
def tabulate_gene_hits(self):
self.debugger("On function: tabulate_gene_hits")
logging.info("Begin tabulating gene hits...\n")
for ref, pos in self.sam_file_contents.iteritems():
if len(pos) == 0:
logging.info("Reference " + ref + " has no reads which mapped to it.")
else:
start_time = time()
it = 0
logging.info("Working on reference = " + ref)
chrom = self.chromosomes[ref]
num_genes = float(len(chrom.gene_list))
for i,gene in enumerate(chrom.gene_list,start=1):
self.update_progress(float(i)/num_genes)
beg = gene.start_trunc
end = gene.end_trunc
curr_hop = self.update_current_hop(it, self.sam_file_contents[ref])
while curr_hop.position < beg:
it += 1
curr_hop = self.update_current_hop(it, self.sam_file_contents[ref])
if not curr_hop:
break
if not curr_hop:
break
while beg <= curr_hop.position <= end:
gene.hop_list.append(curr_hop)
it += 1
curr_hop = self.update_current_hop(it, self.sam_file_contents[ref])
if not curr_hop: # We ran out of hop hits, so we are done.
break
if not curr_hop:
break
else:
it -= 50
if it < 0:
it = 0
self.update_progress(1)
self.print_time_output(" Done tabulating gene hits,",start_time)
logging.info(bcolors.WARNING + " ---------------\n" + bcolors.ENDC)
def get_normalized_coefficients(self):
self.debugger("On function: get_normalized_coefficients")
logging.info("\nBegin Normalization Steps.\n")
total_counted_hops = [0] * self.num_conditions
if self.normalize == "Total":
for ref_name,chrom in self.chromosomes.iteritems():
for gene in chrom.gene_list:
total_counted_hops = [x + y for x, y in zip(total_counted_hops, gene.hop_totals())]
logging.info("Total hops observed is: " + str(sum(total_counted_hops)))
for i,total in enumerate(total_counted_hops):
logging.info(self.int_prefix[i] + " has " + str(total) + " [total] hops observed.")
elif self.normalize == "Intergenic":
for ref_name,chrom in self.chromosomes.iteritems():
for gene in chrom.gene_list:
if gene.is_intergenic:
total_counted_hops = [x + y for x, y in zip(total_counted_hops, gene.hop_totals())]
logging.info("Total [intergenic] hops observed is: " + str(sum(total_counted_hops)))
for i,total in enumerate(total_counted_hops):
logging.info(self.int_prefix[i] + " has " + str(total) + " [intergenic] hops observed.")
else:
logging.error("Error with normalization.")
sys.exit()
minimum = min(total_counted_hops)
self.debugger("min = ",minimum)
if minimum <= 0:
logging.error("Normalization couldn't be completed. It appears a condition has no hop hits.")
self.normalization_coefficients = [1] * self.num_conditions
return
#sys.exit('Exiting')
for i,totals in enumerate(total_counted_hops):
self.normalization_coefficients.append(float(minimum)/float(totals))
logging.info('Normalization coefficients used:')
for i,condition in enumerate(self.int_prefix):
logging.info(condition + " multiplied by " + str(self.normalization_coefficients[i]))
def write_output(self):
self.debugger("On function: write_output")
logging.info("Begin calculating gene totals and writing to output...")
start_time = time()
if self.delete_intermediate_files:
logging.info("Deleting Intermediate Folder.")
subprocess.check_output(["rmdir",self.output_directory + "intermediate_files/"])
with open(self.tabulated_filename, 'w') as hf, open(self.gene_tabulated_filename, 'w') as gf, open(self.intergenic_filename, 'w') as intf:
hops_header = ["Num","GeneID"]
hops_header.extend(self.int_prefix)
hops_header.extend([s + "(Normalized)" for s in self.int_prefix])
hops_header.extend(["Start","Stop","Order","Strand","Length","PID","Gene","Function"])
hf.write("\t".join(hops_header)+"\n")
gf.write("\t".join(hops_header)+"\n")
intf.write("\t".join(hops_header)+"\n")
count = 1
for ref_name, chrom in self.chromosomes.iteritems():
for gene in chrom.gene_list:
if len(gene.hop_list) > 0: # If the gene even has hops in it to write.
self.igv_filenames[ref_name].write(gene.write_igv(ref_name, self.igv_normalize, self.normalization_coefficients) + "\n")
else:
pass
if not gene.is_intergenic:
hf.write(gene.write_hops(count,self.normalization_coefficients)+"\n")
gf.write(gene.write_gene()+"\n")
count += 1
else: # We can write the intergenic stuff to a file as well for fun.
intf.write(gene.write_hops(count,self.normalization_coefficients)+"\n")
self.print_time_output("Done calculating totals and writing to output,", start_time)
def update_progress(self,progress):
progress = int(round(progress * 100.0))
sys.stdout.write('\r[{0}] {1}%'.format('#'*(progress/5), progress))
sys.stdout.flush()
if progress == 100:
sys.stdout.write(' ')
def update_current_hop(self, it, sam_file_contents):
if it > len(sam_file_contents)-1:
return None
hop = sam_file_contents[it]
return hop
def prepare_igv_files(self):
igv_path = self.output_directory + "output_files/IGV/"
if not os.path.exists( self.output_directory + "output_files/IGV/"):
subprocess.check_output(["mkdir", igv_path])
for ref_name in self.chromosomes.keys():
filename = igv_path + ref_name + ".igv"
self.igv_filenames[ref_name] = open(filename, 'w+')
#Now lets give them a header.
self.igv_filenames[ref_name].write("#Transpon"+ "\n")
header = ["Chromosome", "Start", "End", "Feature"]
for i in self.int_prefix:
header.append(i)
self.igv_filenames[ref_name].write('\t'.join(header) + "\n")
################################
############# Main #############
################################
def main():
#our main object
hp = hops_pipeline()
config = ""
try:
config = sys.argv[1]
except:
sys.exit("\nUSAGE: python TnSeq-Pipeline.py pathtoconfig.config\n")
hp.read_config(config)
hp.run_pipeline()
if __name__ == "__main__":
main()
|
KBoehme/TnSeq-Pipeline
|
TnSeq-Pipeline.py
|
Python
|
mit
| 28,211
|
[
"Bowtie"
] |
4ffc3269e423218414aba5eed36b7f5c6f489e51d384ab55dde8368913f873fc
|
# coding: utf-8
import numpy as np
from math import ceil
from .. import img_as_float
from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman
from .._shared.utils import _mode_deprecations, skimage_deprecation, warn
import warnings
def denoise_bilateral(image, win_size=None, sigma_color=None, sigma_spatial=1,
bins=10000, mode='constant', cval=0, multichannel=True, sigma_range=None):
"""Denoise image using bilateral filter.
This is an edge-preserving and noise reducing denoising filter. It averages
pixels based on their spatial closeness and radiometric similarity.
Spatial closeness is measured by the gaussian function of the euclidian
distance between two pixels and a certain standard deviation
(`sigma_spatial`).
Radiometric similarity is measured by the gaussian function of the euclidian
distance between two color values and a certain standard deviation
(`sigma_color`).
Parameters
----------
image : ndarray, shape (M, N[, 3])
Input image, 2D grayscale or RGB.
win_size : int
Window size for filtering.
If win_size is not specified, it is calculated as max(5, 2*ceil(3*sigma_spatial)+1)
sigma_color : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range ``[0, 1]``. If the value is ``None`` the standard
deviation of the ``image`` will be used.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for gaussian weights of color filtering.
A larger value results in improved accuracy.
mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}
How to handle values outside the image borders. See
`numpy.pad` for detail.
cval : string
Used in conjunction with mode 'constant', the value outside
the image boundaries.
multichannel : bool
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
Returns
-------
denoised : ndarray
Denoised image.
References
----------
.. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf
Examples
--------
>>> from skimage import data, img_as_float
>>> astro = img_as_float(data.astronaut())
>>> astro = astro[220:300, 220:320]
>>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
>>> noisy = np.clip(noisy, 0, 1)
>>> denoised = denoise_bilateral(noisy, sigma_color=0.05, sigma_spatial=15)
"""
if multichannel:
if image.ndim != 3:
if image.ndim == 2:
raise ValueError("Use ``multichannel=False`` for 2D grayscale "
"images. The last axis of the input image "
"must be multiple color channels not another "
"spatial dimension.")
else:
raise ValueError("Bilateral filter is only implemented for "
"2D grayscale images (image.ndim == 2) and "
"2D multichannel (image.ndim == 3) images, "
"but the input image has {0} dimensions. "
"".format(image.ndim))
elif image.shape[2] not in (3, 4):
if image.shape[2] > 4:
warnings.warn("The last axis of the input image is interpreted "
"as channels. Input image with shape {0} has {1} "
"channels in last axis. ``denoise_bilateral`` is "
"implemented for 2D grayscale and color images "
"only.".format(image.shape, image.shape[2]))
else:
msg = "Input image must be grayscale, RGB, or RGBA; but has shape {0}."
warnings.warn(msg.format(image.shape))
else:
if image.ndim > 2:
raise ValueError("Bilateral filter is not implemented for "
"grayscale images of 3 or more dimensions, "
"but input image has {0} dimension. Use "
"``multichannel=True`` for 2-D RGB "
"images.".format(image.shape))
if sigma_range is not None:
warn('`sigma_range` has been deprecated in favor of '
'`sigma_color`. The `sigma_range` keyword argument '
'will be removed in v0.14', skimage_deprecation)
#If sigma_range is provided, assign it to sigma_color
sigma_color = sigma_range
if win_size is None:
win_size = max(5, 2*int(ceil(3*sigma_spatial))+1)
mode = _mode_deprecations(mode)
return _denoise_bilateral(image, win_size, sigma_color, sigma_spatial,
bins, mode, cval)
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter.
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)
def _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
im : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = im.ndim
p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype)
g = np.zeros_like(p)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
slices_d = [slice(None), ] * ndim
slices_p = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax+1] = slice(0, -1)
slices_p[0] = ax
d[slices_d] += p[slices_p]
slices_d[ax] = slice(None)
slices_p[ax+1] = slice(None)
out = im + d
else:
out = im
E = (d ** 2).sum()
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
slices_g = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_g[ax+1] = slice(0, -1)
slices_g[0] = ax
g[slices_g] = np.diff(out, axis=ax)
slices_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]
E += weight * norm.sum()
tau = 1. / (2.*ndim)
norm *= tau / weight
norm += 1.
p -= tau * g
p /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200,
multichannel=False):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
im : ndarray of ints, uints or floats
Input data to be denoised. `im` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = im.dtype
if not im_type.kind == 'f':
im = img_as_float(im)
if multichannel:
out = np.zeros_like(im)
for c in range(im.shape[-1]):
out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps,
n_iter_max)
else:
out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max)
return out
|
pratapvardhan/scikit-image
|
skimage/restoration/_denoise.py
|
Python
|
bsd-3-clause
| 12,531
|
[
"Gaussian"
] |
ed1b7dc1012013744198df3c16b62d2b1cacb8aa8b5b72bc4f45f870330edeb5
|
import argparse
import datetime
import shelve
import sys
from pheme.util.config import Config
class TermCache(object):
"""Persistent cache of terms and their anonymized values
Interface to lookup a value (presumably anonymized) from
any existing term, so set a term's anonymized value, and persist
the entire store to the filesystem.
"""
def __init__(self):
cachefile = Config().get('anonymize', 'cachefile')
self.shelf = shelve.open(cachefile, writeback=True)
def _convert_key(self, key):
if isinstance(key, str):
return key
return str(key)
def __contains__(self, key):
return self.shelf.__contains__(self._convert_key(key))
def __getitem__(self, key):
if self._convert_key(key) in self.shelf:
return self.shelf[self._convert_key(key)]
return None
def __setitem__(self, key, value):
self.shelf[self._convert_key(key)] = value
self.shelf.sync()
def __delitem__(self, key):
del self.shelf[self._convert_key(key)]
tc = TermCache() # module level singleton
def lookup_term(term):
"""lookup term - return if found, None otherwise"""
return tc[term]
def store_term(term, value):
"""set term to value in cache"""
tc[term] = value
def delete_term(term):
"""delete term from cache"""
del tc[term]
def lookup_term_ep():
"""entry point to lookup arbitrary term from persistent cache"""
parser = argparse.ArgumentParser()
parser.add_argument("term", help="lookup 'term' in termcache")
args = parser.parse_args()
if args.term.count(',') == 4:
# make it easy to convert datetime references
term = datetime.datetime(*(int(x) for x in args.term.split(',')))
result = lookup_term(term.strftime('%Y%m%d%H%M'))
dt = datetime.datetime.strptime(result, '%Y%m%d%H%M%S')
result = dt.strftime('%Y,%m,%d,%H,%M,%S')
elif args.term.count('^') == 3:
# lookup constituent visit / patient id parts
id = lookup_term(args.term[:args.term.index('^^^')])
org = lookup_term(args.term[args.term.index('^^^')+3:])
result = id + '^^^' + org
else:
result = lookup_term(args.term)
if result is not None:
#print "%s:%s" % (args.term, result)
print result
return
else:
print >> sys.stderr, "Not Found: '%s'" % args.term
sys.exit(1)
def store_term_ep():
"""entry point to store arbitrary term from persistent cache"""
parser = argparse.ArgumentParser()
parser.add_argument("term", help="the 'term' to set in termcache")
parser.add_argument("value", help="set 'value' for 'term'")
parser.add_argument("-o", "--overwrite", action='store_true',
help="overwrite existing values if set")
args = parser.parse_args()
if not args.overwrite and lookup_term(args.term) is not None:
raise ValueError("term '%s' already assigned, "
"overwrite flag not set" % args.term)
store_term(args.term, args.value)
print "Cached %s:%s" % (args.term, args.value)
return
|
pbugni/pheme.anonymize
|
pheme/anonymize/termcache.py
|
Python
|
bsd-3-clause
| 3,154
|
[
"VisIt"
] |
bc96d7bc4a6b60d48054de299045162050671e5eaf11498418b04c665d86ca2c
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.patient_index, name='index'),
url(r'^(\d+)/$', views.patient_show),
url(r'^get$', views.patient_get),
url(r'^create$', views.patient_create),
url(r'^(\d+)/appointment/create$', views.appointment_create),
url(r'^(\d+)/appointment/(\d+)$', views.appointment_show),
url(r'^appointment/get$', views.appointment_get),
url(r'^lab_result/create$', views.lab_result_create),
url(r'^lab_result/store$', views.lab_result_store),
url(r'^(\d+)/lab_result/(\d+)$', views.lab_result_show),
url(r'^(\d+)/visit/create$', views.visit_create),
url(r'^(\d+)/vitals/create$', views.vitals_create),
url(r'^(\d+)/vitals/trend/(\w+)$', views.vitals_trend),
url(r'^(\d+)/screening/create$', views.screening_create),
url(r'^(\d+)/screening/(\d+)$', views.screening_show),
url(r'^(\d+)/treatment/create$', views.treatment_create),
url(r'^treatment/get$', views.treatment_get),
]
|
aliakbars/tbdc
|
patient/urls.py
|
Python
|
apache-2.0
| 1,008
|
[
"VisIt"
] |
90b8173911858f3366b1affca14a859fab463d7df978af02eca65e2d60311dcf
|
#!/usr/bin/env python
# Notes:
# - The above pragma line is only required if you plan to run this module as a stand-alone script to run any test harnesses which may occur after "if __name__ == '__main__'".
#
# - You should run lime with this script in the form
# pylime model.py
# You will need the location of pylime in your PATH environment variable; also you need to have the location of the par_classes.py module in your PYTHONPATH environment variable.
import math
# For definitions of the classes ModelParameters and ImageParameters:
from par_classes import *
# Note that the useful macros defined in lime.h are also provided here in the dictionary 'macros' provided as an argument to each function below. See the example code at the end for the full list of macro values provided.
#.......................................................................
def input(macros):
par = ModelParameters()
# We give all the possible parameters here, but have commented out many which can be left at their defaults.
# Parameters which must be set (they have no sensible defaults).
#
par.radius = 2000.0*macros["AU"]
par.minScale = 0.5*macros["AU"]
par.pIntensity = 4000
par.sinkPoints = 3000
# Parameters which may be omitted (i.e. left at their default values) under some circumstances.
#
par.dust = "jena_thin_e6.tab"
par.outputfile = "populations.pop"
par.binoutputfile = "restart.pop"
par.gridfile = "grid.vtk"
# par.pregrid = "pregrid.asc"
# par.restart = "restart.pop"
# par.gridInFile = "grid_5.ds"
# Setting elements of the following two arrays is optional. NOTE
# that, if you do set any of their values, you should set as many as
# the number of elements returned by your function density(). The
# ith element of the array in question will then be assumed to refer
# to the ith element in the density function return. The current
# maximum number of elements allowed is 7, which is the number of
# types of collision partner recognized in the LAMBDA database.
#
# Note that there is no (longer) a hard connection between the
# number of density elements and the number of collision-partner
# species named in the moldata files. This means in practice that,
# if you set the values for par->collPartIds, you can, if you like,
# set some for which there are no transition rates supplied in the
# moldatfiles. This might happen for example if there is a molecule
# which contributes significantly to the total molecular density but
# for which there are no measured collision rates for the radiating
# species you are interested in.
#
# You may also omit to mention in par->collPartIds a collision
# partner which is specified in the moldatfiles. In this case LIME
# will assume the density of the respective molecules is zero.
#
# If you don't set any values for any or all of these arrays,
# i.e. if you omit any mention of them here (we preserve this
# possibility for purposes of backward compatibility), LIME will
# attempt to replicate the algorithms employed in version 1.5, which
# involve guessing which collision partner corresponds to which
# density element. Since this was not exactly a rigorous procedure,
# we recommend use of the arrays.
#
# par->nMolWeights: this specifies how you want the number density
# of each radiating species to be calculated. At each grid point a
# sum (weighted by par->nMolWeights) of the density values is made,
# then this is multiplied by the abundance to return the number
# density.
#
# Note that there are convenient macros defined in ../src/lime.h for
# 7 types of collision partner.
#
# Below is an example of how you might use these parameters:
#
par.collPartIds = [macros["CP_H2"]] # must be a list, even when there is only 1 item.
par.nMolWeights = [1.0] # must be a list, even when there is only 1 item.
# par.collPartNames = ["phlogiston"] # must be a list, even when there is only 1 item.
# par.collPartMolWeights = [2.0159] # must be a list, even when there is only 1 item.
# par.gridDensMaxValues = [1.0] # must be a list, even when there is only 1 item.
# par.gridDensMaxLoc = [[0.0,0.0,0.0]] # must be a list, each element of which is also a list with 3 entries (1 for each spatial coordinate).
# par.tcmb = 2.72548
# par.lte_only = False
# par.init_lte = False
# par.samplingAlgorithm = 0
par.sampling = 2 # Now only accessed if par.samplingAlgorithm==0 (the default).
# par.blend = False
# par.polarization = False
# par.nThreads = 1
par.nSolveIters = 14
par.traceRayAlgorithm = 1
# par.resetRNG = False
# par.doSolveRTE = False
# par.gridOutFiles = ['','','','',"grid_5.ds"] # must be a list with 5 string elements, although some or all can be empty.
par.moldatfile = ["hco+@xpol.dat"] # must be a list, even when there is only 1 item.
# par.girdatfile = ["myGIRs.dat"] # must be a list, even when there is only 1 item.
# Definitions for image #0. Add further similar blocks for additional images.
#
par.img.append(ImageParameters()) # by default this list par.img has 0 entries. Each 'append' will add an entry. The [-1] entry is the most recently added.
par.img[-1].nchan = 61 # Number of channels
par.img[-1].trans = 3 # zero-indexed J quantum number
# par.img[-1].molI = -1
par.img[-1].velres = 500.0 # Channel resolution in m/s
par.img[-1].imgres = 0.1 # Resolution in arc seconds
par.img[-1].pxls = 100 # Pixels per dimension
par.img[-1].unit = 0 # 0:Kelvin 1:Jansky/pixel 2:SI 3:Lsun/pixel 4:tau
# par.img[-1].freq = -1.0
# par.img[-1].bandwidth = -1.0
par.img[-1].source_vel = 0.0 # source velocity in m/s
# par.img[-1].theta = 0.0
# par.img[-1].phi = 0.0
# par.img[-1].incl = 0.0
# par.img[-1].posang = 0.0
# par.img[-1].azimuth = 0.0
par.img[-1].distance = 140.0*macros["PC"] # source distance in m
par.img[-1].doInterpolateVels = True
par.img[-1].filename = "image0.fits" # Output filename
# par.img[-1].units = "0,1"
return par
#.......................................................................
#.......................................................................
# User-defined functions:
#.......................................................................
def density(macros, x, y, z):
"""
The value returned should be a list, each element of which is a density (in molecules per cubic metre) of a molecular species (or electrons). The molecule should be one of the 7 types currently recognized in the LAMDA database - see
http://home.strw.leidenuniv.nl/~moldata/
Note that these species are expected to be the bulk constituent(s) of the physical system of interest rather than species which contribute significantly to spectral-line radiation. In LIME such species are often called 'collision partners'.
The identity of each collision partner is provided via the list parameter par.collPartIds. If you do provide this, obviously it must have the same number and ordering of elements as the density list you provide here; if you don't include it, LIME will try to guess the identities of the species you provide density values for.
"""
rMin = 0.7*macros["AU"] # greater than zero to avoid a singularity at the origin.
# Calculate radial distance from origin
#
r = math.sqrt(x*x+y*y+z*z)
# Calculate a spherical power-law density profile
# (Multiply with 1e6 to go to SI-units)
#
if r>rMin:
rToUse = r
else:
rToUse = rMin # Just to prevent overflows at r==0!
listOfDensities = [1.5e6*((rToUse/(300.0*macros["AU"]))**(-1.5))*1e6] # must be a list, even when there is only 1 item.
return listOfDensities
#.......................................................................
def temperature(macros, x, y, z):
"""
This function should return a tuple of 2 temperatures (in kelvin). The 2nd is optional, i.e. you can return None for it, and LIME will do the rest.
"""
# Array containing temperatures as a function of radial
# distance from origin (this is an example of a tabulated model)
#
rToTemp = [
[2.0e13, 5.0e13, 8.0e13, 1.1e14, 1.4e14, 1.7e14, 2.0e14, 2.3e14, 2.6e14, 2.9e14],
[44.777, 31.037, 25.718, 22.642, 20.560, 19.023, 17.826, 16.857, 16.050, 15.364]
]
# Calculate radial distance from origin
#
r = math.sqrt(x*x+y*y+z*z)
# Linear interpolation in temperature input
#
xi = 0
if r>rToTemp[0][0] and r<rToTemp[0][9]:
for i in range(9):
if r>rToTemp[0][i] and r<rToTemp[0][i+1]: xi=i
if r<rToTemp[0][0]:
temp0 = rToTemp[1][0]
elif r>rToTemp[0][9]:
temp0 = rToTemp[1][9]
else:
temp0 = rToTemp[1][xi]+(r-rToTemp[0][xi])*(rToTemp[1][xi+1]-rToTemp[1][xi])\
/ (rToTemp[0][xi+1]-rToTemp[0][xi])
# return (temp0, None)
return [temp0, 0.0]
#.......................................................................
def abundance(macros, x, y, z):
"""
This function should return a list of abundances (as fractions of the effective bulk density), 1 for each of the radiating species. Note that the number and identity of these species is set via the list of file names you provide in the par.moldatfile parameter, so make sure at least that the number of elements returned by abundance() is the same as the number in par.moldatfile!
Note that the 'effective bulk density' mentioned just above is calculated as a weighted sum of the values returned by the density() function, the weights being provided in the par.nMolWeights parameter.
"""
# Here we use a constant abundance. Could be a
# function of (x,y,z).
#
listOfAbundances = [1.0e-9] # must be a list, even when there is only 1 item.
return listOfAbundances
#.......................................................................
def doppler(macros, x, y, z):
"""
This function returns the Doppler B parameter, defined in terms of a Doppler-broadened Gaussian linewidth as follows:
( -[v-v0]^2 )
flux(v) = exp(-----------).
( B^2 )
Note that the present value refers only to the Doppler broadening due to bulk turbulence; LIME later adds in the temperature-dependent part (which also depends on molecular mass).
"""
# 200 m/s as the doppler b-parameter. This
# can be a function of (x,y,z) as well.
# Note that *doppler is a pointer, not an array.
# Remember the * in front of doppler.
#
dopplerBValue = 200.0
return dopplerBValue
#.......................................................................
def velocity(macros, x, y, z):
"""
Gives the bulk gas velocity vector in m/s.
"""
rMin = 0.1*macros["AU"] # greater than zero to avoid a singularity at the origin.
# Calculate radial distance from origin
#
r = math.sqrt(x*x+y*y+z*z)
if r>rMin:
rToUse = r
else:
rToUse = rMin # Just to prevent overflows at r==0!
# Free-fall velocity in the radial direction onto a central
# mass of 1.0 solar mass
#
ffSpeed = math.sqrt(2.0*macros["GRAV"]*1.989e30/rToUse)
vel = [0,0,0] # just to initialize its size.
vel[0] = -x*ffSpeed/rToUse
vel[1] = -y*ffSpeed/rToUse
vel[2] = -z*ffSpeed/rToUse
return vel
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if __name__ == '__main__':
# Put any private debugging tests here, which you can then run by calling the module directly from the unix command line.
macros = {\
"AMU" :1.66053904e-27,\
"CLIGHT" :2.99792458e8,\
"HPLANCK" :6.626070040e-34,\
"KBOLTZ" :1.38064852e-23,\
"GRAV" :6.67428e-11,\
"AU" :1.495978707e11,\
"LOCAL_CMB_TEMP":2.72548,\
"PC" :3.08567758e16,\
"PI" :3.14159265358979323846,\
"SPI" :1.77245385091,\
"CP_H2" :1,\
"CP_p_H2" :2,\
"CP_o_H2" :3,\
"CP_e" :4,\
"CP_H" :5,\
"CP_He" :6,\
"CP_Hplus":7\
}
par = input(macros)
x = par.radius*0.1
y = par.radius*0.07
z = par.radius*0.12
print density( macros, x, y, z)[0]
print temperature(macros, x, y, z)[0]
print doppler( macros, x, y, z)
print velocity( macros, x, y, z)
|
allegroLeiden/lime
|
example/model.py
|
Python
|
gpl-3.0
| 12,700
|
[
"Gaussian",
"VTK"
] |
c652c7ce1f3f4770a623a9bd8eaa4c7d1be7b6ccf6040fe8d6204ed0a9d38244
|
'''
Module for utility functions useful in a variety
of GIS programs
@author: Jacob Oberman
'''
import time
import calendar
from itertools import izip
import numpy
import netCDF4
import filetypes
def wrap_lon_0_360(lon):
'''Wrap a single longitude to the interval [0, 360)'''
while lon < 0:
lon += 360
while lon >= 360:
lon -= 360
return lon
def wrap_lon_neg180_180(lon):
'''Wrap a single longitude to the interval (-180, 180]'''
while lon <= -180:
lon += 360
while lon > 180:
lon -= 360
return lon
def timestr_to_nsecs(timestr,
epoch='00:00:00 01-01-1970',
format='%H:%M:%S %m-%d-%Y'):
'''
Convert a given string to the number of seconds
since a specified epoch.
Defaults to converting into Unix time
Inputs:
timestr - a string representing the time to convert
epoch - a string representing desired epoch time
format - a format string as in time.strptime used for timestr and epoch
Outputs:
double - number of seconds since epoch
'''
desStructTime = time.strptime(timestr, format)
epochStructTime = time.strptime(epoch, format)
desUnixTime = calendar.timegm(desStructTime)
epochUnixTime = calendar.timegm(epochStructTime)
return desUnixTime - epochUnixTime
def nsecs_to_timestr(nSecsSinceEpoch,
epoch='00:00:00 01-01-1970',
format='%H:%M:%S %m-%d-%Y'):
'''
Convert the number of seconds since a specified epoch
to a human-readable time string.
Defaults to converting out of Unix time
Inputs:
nSecsSinceEpoch - number of seconds since specified epoch
epoch - a string in the style of format representing epoch time
format - a format string to convert into as in time.strptime
Outputs:
string - Human readable time string
'''
tStructAtEpoch = time.strptime(epoch, format)
nSecsAtEpoch = calendar.timegm(tStructAtEpoch)
nSecsUnix = nSecsSinceEpoch + nSecsAtEpoch
tStructUnix = time.gmtime(nSecsUnix)
return time.strftime(format, tStructUnix)
def UTCoffset_from_lon(lon):
'''
Calculate the approximate offset from UTC based on longitude.
Offset is returned in seconds and will be positive
or negative as appropriate
Does not account for daylight savings time. Uses an
algorithm based on simply the longitude, and therefore
does not account for actual political timezones.
Longitude assumed to be in degrees.
'''
hours2secs = 60*60
return hours2secs*round(wrap_lon_neg180_180(lon)/15.0)
def find_occurences(superArray, subArray):
'''
Find the occurrences of a particular subarray within a superarray,
searching along the rightmost axis of the array. Returns a logical array
with of rank 1 less than the superarray. Output is true where the subarray
matched, false otherwise.
'''
return numpy.apply_along_axis(numpy.array_equal, -1, superArray, subArray)
def write_grid_to_netcdf(griddef, outFname):
'''
Function to create netCDF files that contain
the lat/lon data needed to plot for a given grid
definition that uses rows/columns
Inputs:
griddef - an instantiated griddef object
outFname - a path to the outfile (will be clobbered)
'''
(minRow, maxRow, minCol, maxCol) = griddef.indLims()
nRows = maxRow-minRow+1
nCols = maxCol-minCol+1
# create the index vectors we need to make grids
(cols, rows) = numpy.meshgrid(numpy.arange(minCol, maxCol+1),
numpy.arange(minRow, maxRow+1))
cols = cols.astype(numpy.float32) # cast as precaution
rows = rows.astype(numpy.float32)
# write out netcdf
fid = netCDF4.Dataset(outFname, 'w', format='NETCDF3_CLASSIC')
# create dimensions and variables
fid.createDimension('row', nRows)
fid.createDimension('col', nCols)
dims = ('row', 'col')
# create the 5 grid definitions
offsets = [(0,0), (1,0), (1,1), (0,1), (.5,.5)] # row,col
labels = ['ll', 'ul', 'ur', 'lr', 'cent']
for (lbl, (rowOff, colOff)) in izip(labels,offsets):
lon = fid.createVariable(lbl+'_lon', 'f', dims)
setattr(lon, 'Units', 'degrees_east')
lat = fid.createVariable(lbl+'_lat', 'f', dims)
setattr(lat, 'Units', 'degrees_north')
(lat[:], lon[:]) = griddef.griddedToGeo(rows+rowOff, cols+colOff)
# write grid parameters to file as global attributes
setattr(fid, 'Projection', griddef.__class__.__name__[:-8])
for (k,v) in griddef.parms.iteritems():
setattr(fid, k, v)
fid.close()
def parse_fromFile_input_file(inFileName, dryRun):
'''
Open and read the file and parse it
for various parameter values.
Generate and return a command line call
that can be used to call whips
with those parameter values.
'''
call = []
attrs = []
f = open(inFileName, 'r')
s = f.readline()
try:
while(s != ""):
if(s == "BEGIN\n"):
while(s != ""):
s = f.readline()
if(s == "END\n"):
return call + ["--projAttrs"] + attrs
if(s[0] == '.'):
continue
s = s.split('"')
if(len(s) > 1 and dryRun):
print "Warning: Found line containing "\
"stray quotation marks... stripping."
words = ("".join(s)).split()
'''Determine what the line is supposed to do'''
if(words == []):
continue
elif(words[1] != "="):
break
elif(words[0] == "DIRECTORY"):
call += ["--directory",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "FILELIST"):
call += ["--fileList"] + words[2:]
elif(words[0] == "FILETYPE"):
call += ["--filetype",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "GRIDPROJ"):
call += ["--gridProj",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "MAPFUNC"):
call += ["--mapFunc",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "OUTFUNC"):
call += ["--outFunc",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "OUTDIRECTORY"):
call += ["--outDirectory",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "OUTFILENAME"):
call += ["--outFileName",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "INCLUDEGRID"):
call += ["--includeGrid",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "VERBOSE"):
call += ["--verbose",
"{0}".format(' '.join(words[2:]))]
elif(words[0] == "INTERACTIVE"):
call += ["--interactive",
"{0}".format(' '.join(words[2:]))]
elif(dryRun):
attrs += ['"{0}:{1}"'.format(words[0], \
' '.join(words[2:]))]
else:
attrs += ['{0}:{1}'.format(words[0], \
' '.join(words[2:]))]
break
s = f.readline()
except:
pass
if(s != ""):
raise SyntaxError ("Invalid input file. File must be formatted " \
"correctly.\nCheck line '{0}' and try again".format(s))
raise SyntaxError ("Invalid input file. Check that the file matches the "\
"format described in the documentation and try again")
def parse_filetype(namespace):
'''
Open and read the filetype file
to add associated parameters to namespace
'''
filetype = getattr(filetypes, namespace.filetype + "_filetype")
printfiletype = namespace.filetype
wng = ""
for attr in dir(filetype):
if attr == "parser":
setattr(namespace, "filetype", getattr(filetype, attr))
elif attr == "doutf":
setattr(namespace, "outFunc", getattr(filetype, attr))
elif attr[0] == "_":
pass
else:
try:
wng += "Warning: Value {0} supplied for attribute {1} " \
"has been ignored. Filetype {2} does not support "\
"custom values for this parameter.\n".format(\
getattr(namespace, attr), attr, printfiletype)
except AttributeError:
pass
setattr(namespace, attr, getattr(filetype, attr))
wng += "Value for {0} given by filetype information\n".format(attr)
try:
if (namespace.verbose != 'False'):
print wng
except:
pass
return namespace
|
barronh/WHIPS
|
process_sat/utils.py
|
Python
|
mit
| 9,559
|
[
"NetCDF"
] |
fde2db798e7e40c83fe16bcc86554780cc30411bfcdb20d433596956775b3127
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os, time
import pickle, io
import socket
from threading import Thread
from .startup import octopussy
print(octopussy)
HOSTNAME = socket.gethostbyaddr(socket.gethostname())[0]
BLOCK_SIZE = 32768
PICKLE_PROTOCOL = 2
STATUS_PORT = 60000
PYSCES_PORT = 60001
CFSERVE_PORT = 60005
class SimpleClient:
"""
Sends a list of commands: data = [cmd1,cmd2, ...]
Standard blocking IO where for each cmd sent a response
is expected and held in self.response. This is meant
for quick sequential jobs (housekeeping functions etc)
"""
server = None
port = None
block_size = None
sent = None
response = None
timeout = None
def __init__(self, server, port, block_size, myname=None):
self.server = server
self.port = port
self.block_size = block_size
self.sent = []
self.response = []
def send(self, data):
self.sent = data
self.response = []
for d in self.sent:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(self.timeout)
self.s.connect((self.server, self.port))
self.s.send(d)
print('Sent: ', d)
self.response.append(self.s.recv(self.block_size))
self.s.close()
class SimpleMultiReadClient:
"""
Reads a data block which is larger than block_size
Standard blocking IO which issues a single command (data)
and reads block_size until all data is returned
"""
server = None
port = None
block_size = None
sent = None
response = None
timeout = None
def __init__(self, server, port, block_size, myname=None):
self.server = server
self.port = port
self.block_size = block_size
def send(self, data):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(self.timeout)
self.s.connect((self.server, self.port))
self.s.send(data)
print('Sent: ', data)
GO = True
self.response = ''
while GO:
data = self.s.recv(self.block_size)
self.response += data
if data == '':
GO = False
self.s.close()
class ThreadedClient(Thread):
"""
Standard blocking IO where command is a single P_PROTOCOL command which
expects a response (held in self.response), however, every instance
of this client is a new thread. This is for long running commands (jobs)
"""
server = None
port = None
block_size = None
response = None
timeout = None
def __init__(self, command, server, port, block_size, myname=None):
Thread.__init__(self)
self.command = command
self.server = server
self.port = port
self.block_size = block_size
if myname != None:
self.setName(myname)
def run(self):
self.SendLoop()
def SendLoop(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(self.timeout)
self.s.connect((self.server, self.port))
self.s.send(self.command)
self.response = self.s.recv(self.block_size)
print(self.getName() + ' Sent: ', self.command)
self.s.close()
#time.sleep(0.5)
class BasicServerSocket(Thread):
backlog = 5
port = None
block_size = None
client = None
client_address = None
RequestLogOn = True
server_active = True
def __init__(self, port, block_size, myname=None):
Thread.__init__(self)
self.port = port
self.block_size = block_size
if myname != None:
self.setName(myname)
print(self.getName() + ': Ready to serve!')
def run(self):
self.ListenLoop()
def SendAction(self, data):
print(self.client_address[0] + ', ' + time.strftime('%H:%M:%S') + ', ' + self.getName() + ', ' + data)
return data
def RequestLog(self, data):
print(self.client_address[0] + ', ' + time.strftime('%H:%M:%S') +\
', ' + self.getName() + ', ' + data)
def KillCheck(self, data):
if data[:4] == 'KILL':
data = 'You killed the server at: ' + time.strftime('%H:%M:%S')
self.server_active = False
print(self.client_address[0] + ' terminated me (' + self.getName() + ') at '+ time.strftime('%H:%M:%S'))
return data
def ListenLoop(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('', self.port))
self.s.listen(self.backlog)
while self.server_active:
self.client, self.client_address = self.s.accept()
data = self.client.recv(self.block_size)
if self.RequestLogOn: self.RequestLog(data)
data = self.KillCheck(data)
data = self.SendAction(data)
self.client.send(data)
self.client.close()
self.s.close()
class StatusServer(BasicServerSocket):
STATUS = 'READY'
def __init__(self, port, block_size, myname=None):
self.port = port
self.block_size = block_size
BasicServerSocket.__init__(self, port, block_size, myname)
self.setDaemon(True)
def SendAction(self, data):
if data == 'P_RESET_STATUS':
self.STATUS = 'READY'
data = self.STATUS
return data
class BasicServer(BasicServerSocket):
PROTOCOL = None
debug = True
RESULT = None
BASIC_COMMAND_LIST = ('P_GETDATA', 'P_STORE_DATA', 'P_NONE')
COMMAND_LIST = ()
status_server = None
STATUS = 'READY'
def __init__(self, port, block_size, status_server=None, myname=None):
self.port = port
self.block_size = block_size
## self.COMMAND_LIST = ()
if status_server != None:
self.status_server = status_server
self.PROTOCOL = {}
self.BuildProtocolTable()
BasicServerSocket.__init__(self, port, block_size, myname)
def setStatus(self,status):
self.STATUS = status
if self.status_server != None:
self.status_server.STATUS = status
def SendAction(self, data):
data = data.split(',')
if data[0] in list(self.PROTOCOL.keys()):
print(self.client_address[0] + ', ' + time.strftime('%H:%M:%S') +\
', ' + self.getName() + ', EXECUTE ' + str(data).replace(',', ''))
try:
if len(data) > 1:
data = str(self.PROTOCOL[data[0]](data[1:]))
else:
data = str(self.PROTOCOL[data[0]]())
except Exception as ex:
print('ProcessException', ex)
data = 'False'
else:
print(self.client_address[0] + ', ' + time.strftime('%H:%M:%S') +\
', ' + self.getName() + ', UNKNOWN ' + str(data).replace(',', ''))
data = 'False'
return data
def P_GETDATA(self, *args):
self.setStatus('SENDING_DATA')
F = io.StringIO()
pickle.dump(self.RESULT, F, PICKLE_PROTOCOL)
F.seek(0)
data = 'OK'
while data != '':
data = F.read(self.block_size)
self.client.send(data)
self.setStatus('READY')
print(octopussy)
return True
def P_STORE_DATA(self, *args):
global HOSTNAME
G = open(HOSTNAME + '_data.bin','wb')
pickle.dump(self.RESULT, G, PICKLE_PROTOCOL)
G.flush()
G.close()
return True
def P_NONE(self, *args):
return True
def BuildProtocolTable(self):
for cmd in self.BASIC_COMMAND_LIST:
self.PROTOCOL.setdefault(cmd,getattr(self,cmd))
for cmd in self.COMMAND_LIST:
self.PROTOCOL.setdefault(cmd,getattr(self,cmd))
class ModelFileServer(BasicServerSocket):
model_file = None
model_file_name = 'None'
model_directory = None
def __init__(self, port, block_size, myname=None):
self.port = port
self.block_size = block_size
BasicServerSocket.__init__(self, port, block_size, myname)
self.setDaemon(True)
def ReadFile(self, model_file_name, model_directory=None):
if self.model_directory != None and model_directory == None:
model_directory = self.model_directory
fullP = os.path.join(model_directory,model_file_name)
if os.path.exists(fullP):
self.model_file = open(fullP,'r')
self.model_directory = model_directory
self.model_file_name = model_file_name
return True
else:
return False
def ListenLoop(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('', self.port))
self.s.listen(self.backlog)
while self.server_active:
self.client, self.client_address = self.s.accept()
data = self.client.recv(self.block_size)
data = self.KillCheck(data)
if self.RequestLogOn: self.RequestLog(data)
if data == 'GET':
self.model_file.seek(0)
while data != '':
data = self.model_file.read(self.block_size)
self.client.send(data)
elif data == 'LIST':
F = io.StringIO()
data = os.listdir(self.model_directory)
pickle.dump(data,F,PICKLE_PROTOCOL)
F.seek(0)
while data != '':
data = F.read(self.block_size)
self.client.send(data)
elif data[:4] == 'LOAD':
data = data.split(',')[1]
data = str(self.ReadFile(data))
else:
data = 'False'
self.client.send(data)
self.client.close()
self.s.close()
class ServerStatusCheck(Thread):
"""
Creates a Thread that polls [servers] on port every interval seconds
for their current status. This is collected in a current_status list
as (server,status) tuples
"""
servers = None
port = None
block_size = None
interval = None
current_status = None
go = True
def __init__(self, servers, port, block_size, interval=120, myname=None):
Thread.__init__(self)
self.servers = servers
self.port = port
self.block_size = block_size
self.interval = interval
self.current_status = []
self.setDaemon(True)
def run(self):
while self.go:
self.current_status = []
for s in self.servers:
self.current_status.append(self.PollServer(s))
self.PrintStatus()
time.sleep(self.interval)
def PollServer(self, server):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((server, self.port))
self.s.send('STATUS')
data = self.s.recv(self.block_size)
self.s.close()
return (server, data)
def PrintStatus(self):
print('\n********** Status report %s************\n*' % time.strftime('%H:%M:%S'))
for s in self.current_status:
print('*', s[0], s[1])
print('*\n*********************************************\n')
class TentacleScanner:
def __init__(self,servers):
self.servers = servers
self.servers_ready = []
self.servers_busy = []
self.servers_dead = []
self.feedback = []
self.feedback_history = []
def scan(self):
self.feedback = []
self.servers_ready = []
self.servers_busy = []
self.servers_dead = []
for server in self.servers:
try:
print('Tentacle scanner is trying server:', server)
client = SimpleClient(server, STATUS_PORT, BLOCK_SIZE)
client.timeout = 5
client.send(['STATUS'])
print('Response:', client.response)
self.feedback.append((client.server,client.response[0]))
except Exception as ex:
print(ex)
self.feedback.append((client.server,'FAILED'))
self.feedback_history.append(self.feedback)
## print self.feedback, '\n'
for sv in self.feedback:
if sv[1] == 'FAILED':
self.servers_dead.append(sv[0])
elif sv[1] == 'READY':
self.servers_ready.append(sv[0])
else:
self.servers_busy.append(sv[0])
print('\nready:\n%s \n' % self.servers_ready)
print('busy:\n%s \n' % self.servers_busy)
print('dead:\n%s \n' % self.servers_dead)
def getAvailableServers(self):
self.scan()
return self.servers_ready
def getWorkingServers(self):
self.scan()
return self.servers_busy
def getActiveServers(self):
self.scan()
return self.servers_ready + self.servers_busy
def getDeadServers(self):
self.scan()
return self.servers_dead
class ServerListLoader:
file_name = 'server_list'
directory_name = os.path.dirname(os.path.abspath(os.sys.argv[0]))
server_list = None
def __init__(self):
self.server_list = []
def ReadFile(self, file_name=None, directory_name=None):
if file_name != None:
self.file_name = file_name
if directory_name != None:
self.directory_name = directory_name
self.server_list = []
try:
sFile = open(os.path.join(self.directory_name, self.file_name),'r')
for l in sFile:
l = l.strip()
l = l.strip('\n')
l = l.strip('\r')
l = l.strip('\r\n')
if l == '':
pass
elif l[0] == '#':
pass
else:
self.server_list.append(l)
sFile.close()
return self.server_list
except Exception as ex:
print(ex)
print('Cannot find \'server_list\' file in current directory: %s' % self.directory_name)
print('This is a fatal error please create this file with server names, one per line')
return []
|
bgoli/pysces
|
pysces/kraken/KrakenNET.py
|
Python
|
bsd-3-clause
| 14,998
|
[
"PySCeS"
] |
158694a40eb03d8b8c6352f7d8e2bd78a7db470c3dd40d5fd7ebb2c68b64b172
|
from GaussianArm import GaussianArm
from BernoulliArm import BernoulliArm
from BaseBanditArm import BaseBanditArm
"""
Arm for modelling a customer's purchasing behaviour.
The model consists on a binomial distribution to find whether or not a given
client makes a purchase and a gaussian distribution from which the rewards
are drawn.
The arm's output is obtaining by multiplying the output of both distributions.
"""
class RPVArm(BaseBanditArm):
def __init__(self, p, mean, sigma):
"""Inputs:
p : float -- probability of making a purchase
mean : float -- average ticket
sigma: float -- standard deviation of the average ticket
"""
self.purchase = BernoulliArm(p)
self.ticket = GaussianArm(mean, sigma)
return
def draw(self):
return self.purchase.draw() * max(self.ticket.draw(), 0)
|
MarcoAlmada/bandit-panda
|
bandit/arms/RPVArm.py
|
Python
|
mit
| 879
|
[
"Gaussian"
] |
07f4b8815e35854af6e2f209cc4366641bd4d4aa6348c356c2920af1cce397a8
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np
import os
from nose.tools import assert_raises
from nose.plugins.attrib import attr
from horton import * # pylint: disable=wildcard-import, unused-wildcard-import
def test_shell_nbasis():
assert get_shell_nbasis(-3) == 7
assert get_shell_nbasis(-2) == 5
assert get_shell_nbasis( 0) == 1
assert get_shell_nbasis( 1) == 3
assert get_shell_nbasis( 2) == 6
assert get_shell_nbasis( 3) == 10
with assert_raises(ValueError):
get_shell_nbasis(-1)
def test_gobasis_consistency():
centers = np.random.uniform(-1, 1, (2, 3))
shell_map = np.array([0, 0, 0, 1, 1, 1, 1])
nprims = np.array([2, 3, 3, 5, 5, 5, 7])
shell_types = np.array([2, 1, 0, -2, 3, 0, 1])
alphas = np.random.uniform(0, 1, nprims.sum())
con_coeffs = np.random.uniform(-1, 1, nprims.sum())
gobasis = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
assert gobasis.nbasis == 29
assert gobasis.max_shell_type == 3
scales = gobasis.get_scales()
assert abs(scales[0] - gob_cart_normalization(alphas[0], np.array([2, 0, 0]))) < 1e-10
assert (gobasis.basis_offsets == np.array([0, 6, 9, 10, 15, 25, 26])).all()
assert (gobasis.shell_lookup == np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3,
3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 5, 6, 6, 6])).all()
shell_types = np.array([1, 1, 0, -2, -2, 0, 1])
gobasis = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
assert gobasis.nbasis == 21
assert gobasis.max_shell_type == 2
# The center indexes in the shell_map are out of range.
shell_map[0] = 2
with assert_raises(ValueError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
shell_map[0] = 0
# The size of the array shell_types does not match the sum of nprims.
shell_types = np.array([1, 1])
with assert_raises(TypeError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
shell_types = np.array([1, 1, 0, -2, -2, 0, 1])
# The elements of nprims should be at least 1.
nprims[1] = 0
with assert_raises(ValueError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
nprims[1] = 3
# The size of the array alphas does not match the sum of nprims.
alphas = np.random.uniform(-1, 1, 2)
with assert_raises(TypeError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
alphas = np.random.uniform(-1, 1, nprims.sum())
# Encountered the nonexistent shell_type -1.
shell_types[1] = -1
with assert_raises(ValueError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
shell_types[1] = 1
# The size of con_coeffs does not match nprims.
con_coeffs = np.random.uniform(-1, 1, 3)
with assert_raises(TypeError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
con_coeffs = np.random.uniform(-1, 1, nprims.sum())
# Exceeding the maximym shell type (above):
shell_types[0] = get_max_shell_type()+1
with assert_raises(ValueError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
shell_types[0] = 2
# Exceeding the maximym shell type (below):
shell_types[0] = -get_max_shell_type()-1
with assert_raises(ValueError):
i2 = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
shell_types[0] = 2
def test_load_basis():
for go_basis_family in go_basis_families.itervalues():
assert os.path.basename(go_basis_family.filename).islower()
go_basis_family.load()
def test_grid_lih_321g_hf_density_some_points():
ref = np.array([ # from cubegen
[0.0, 0.0, 0.0, 0.037565082428],
[0.1, 0.0, 0.0, 0.034775306876],
[0.0, 0.1, 0.0, 0.034775306876],
[0.0, 0.0, 1.0, 0.186234028507],
[0.4, 0.2, 0.1, 0.018503681370],
])
ref[:,:3] *= angstrom
mol = IOData.from_file(context.get_fn('test/li_h_3-21G_hf_g09.fchk'))
# check for one point the compute_grid_point1 method
output = np.zeros(mol.obasis.nbasis, float)
point = np.array([0.0, 0.0, 1.0])*angstrom
grid_fn = GB1DMGridDensityFn(mol.obasis.max_shell_type)
mol.obasis.compute_grid_point1(output, point, grid_fn)
# first basis function is contraction of three s-type gaussians
assert mol.obasis.nprims[0] == 3
scales = mol.obasis.get_scales()
total = 0.0
for i in xrange(3):
alpha = mol.obasis.alphas[i]
coeff = mol.obasis.con_coeffs[i]
nrml = gob_cart_normalization(alpha, np.zeros(3, int))
# check scale
assert abs(scales[i] - nrml) < 1e-10
# check that we are on the first atom
assert mol.obasis.shell_map[i] == 0
dsq = np.linalg.norm(point - mol.coordinates[0])**2
gauss = nrml*np.exp(-alpha*dsq)
total += coeff*gauss
assert abs(total - output[0]) < 1e-10
# check density matrix value
dm_full = mol.get_dm_full()
assert abs(dm_full[0,0] - 1.96589709) < 1e-7
points = ref[:,:3].copy()
rhos = mol.obasis.compute_grid_density_dm(dm_full, points)
assert abs(rhos - ref[:,3]).max() < 1e-5
def check_grid_rho(fn, ref, eps):
mol = IOData.from_file(context.get_fn(fn))
points = ref[:,:3].copy()
dm_full = mol.get_dm_full()
rhos = mol.obasis.compute_grid_density_dm(dm_full, points)
assert abs(rhos - ref[:,3]).max() < eps
def test_grid_co_ccpv5z_cart_hf_density_some_points():
ref = np.array([ # from cubegen
[ 0.0, 0.0, 0.0, 4.54392441417],
[ 0.1, 0.0, 0.0, 2.87874696902],
[ 0.0, 0.1, 0.0, 2.90909931711],
[ 0.0, 0.0, 1.0, 0.00563354926],
[ 0.4, 0.2, 0.1, 0.15257439924],
[-0.4, 0.2, 0.1, 0.14408104500],
[ 0.4, -0.2, 0.1, 0.14627065655],
[ 0.4, 0.2, -0.1, 0.11912840380],
])
ref[:,:3] *= angstrom
check_grid_rho('test/co_ccpv5z_cart_hf_g03.fchk', ref, 3e-3)
def test_grid_co_ccpv5z_pure_hf_density_some_points():
ref = np.array([ # from cubegen
[ 0.0, 0.0, 0.0, 4.54338939220],
[ 0.1, 0.0, 0.0, 2.87742753163],
[ 0.0, 0.1, 0.0, 2.90860415538],
[ 0.0, 0.0, 1.0, 0.00285462032],
[ 0.4, 0.2, 0.1, 0.15399703660],
[-0.4, 0.2, 0.1, 0.14425254494],
[ 0.4, -0.2, 0.1, 0.14409038614],
[ 0.4, 0.2, -0.1, 0.11750780363],
])
ref[:,:3] *= angstrom
check_grid_rho('test/co_ccpv5z_pure_hf_g03.fchk', ref, 3e-3)
def check_grid_gradient(fn, ref, eps):
mol = IOData.from_file(context.get_fn(fn))
points = ref[:,:3].copy()
dm_full = mol.get_dm_full()
gradients = mol.obasis.compute_grid_gradient_dm(dm_full, points)
assert abs(gradients - ref[:,3:]).max() < eps
def test_grid_lih_321g_hf_gradient_some_points():
ref = np.array([ # from cubegen
[0.0, 0.0, 0.0, 0.000000000000, 0.000000000000, 0.179349665782],
[0.1, 0.0, 0.0, -0.028292898754, 0.000000000000, 0.164582727812],
[0.0, 0.1, 0.0, 0.000000000000, -0.028292898754, 0.164582727812],
[0.0, 0.0, 1.0, 0.000000000000, 0.000000000000, -0.929962409854],
[0.4, 0.2, 0.1, -0.057943497876, -0.028971748938, 0.069569174116],
])
ref[:, :3] *= angstrom
check_grid_gradient('test/li_h_3-21G_hf_g09.fchk', ref, 1e-6)
def test_grid_lih_321g_hf_orbital_gradient_some_points():
points = np.array([
[0.0, 0.0, 0.0],
[0.1, 0.0, 0.0],
[0.0, 0.1, 0.0],
[0.0, 0.0, 1.0],
[0.4, 0.2, 0.1]
])
ref = np.array([ # calculated using finite difference
[[ 0.000000000000, 0.000000000000, 0.335358022388],
[ 0.000000000000, 0.000000000000, -0.039601719339],
[ 0.000000000000, 0.000000000000, -0.065908783181],
[ 0.066778303100, 0.000000000000, 0.000000000000],
[ 0.000000000000, 0.066778303100, 0.000000000000],
[ 0.000000000000, 0.000000000000, 0.035174712603],
[ 0.000000000000, 0.000000000000, -0.086099568163],
[ 0.121755587880, 0.000000000000, 0.000000000000],
[ 0.000000000000, 0.121755587880, 0.000000000000],
[ 0.000000000000, 0.000000000000, -0.068136399228],
[ 0.000000000000, 0.000000000000, -0.027336484452]],
[[-0.029716570085, 0.000000000000, 0.331085502737],
[ 0.000124168300, 0.000000000000, -0.039374354768],
[ 0.008077765337, 0.000000000000, -0.064966718776],
[ 0.066265798547, 0.000000000000, 0.003804006584],
[ 0.000000000000, 0.066607211612, 0.000000000000],
[ 0.000377488384, 0.000000000000, 0.034880919059],
[-0.000705284103, 0.000000000000, -0.085471067710],
[ 0.120527897959, 0.000000000000, 0.009112195828],
[ 0.000000000000, 0.121345725857, 0.000000000000],
[ 0.013285776833, 0.000000000000, -0.067340615153],
[-0.000503096838, 0.000000000000, -0.027343807139]],
[[ 0.000000000000, -0.029716570085, 0.331085502737],
[ 0.000000000000, 0.000124168300, -0.039374354768],
[ 0.000000000000, 0.008077765337, -0.064966718776],
[ 0.066607211612, 0.000000000000, 0.000000000000],
[ 0.000000000000, 0.066265798547, 0.003804006584],
[ 0.000000000000, 0.000377488384, 0.034880919059],
[ 0.000000000000, -0.000705284103, -0.085471067710],
[ 0.121345725857, 0.000000000000, 0.000000000000],
[ 0.000000000000, 0.120527897959, 0.009112195828],
[ 0.000000000000, 0.013285776833, -0.067340615153],
[ 0.000000000000, -0.000503096838, -0.027343807139]],
[[ 0.000000000000, 0.000000000000, 5.083843181919],
[ 0.000000000000, 0.000000000000, -0.220159704611],
[ 0.000000000000, 0.000000000000, -0.941775028074],
[ 0.095151155766, 0.000000000000, 0.000000000000],
[ 0.000000000000, 0.095151155766, 0.000000000000],
[ 0.000000000000, 0.000000000000, 0.427204143785],
[ 0.000000000000, 0.000000000000, -0.520952693280],
[ 0.190225965755, 0.000000000000, 0.000000000000],
[ 0.000000000000, 0.190225965755, 0.000000000000],
[ 0.000000000000, 0.000000000000, -0.259089767114],
[ 0.000000000000, 0.000000000000, 0.108036467562]],
[[-0.122409907465, -0.061204953733, 0.310330235810],
[ 0.001247852781, 0.000623926390, -0.037125981310],
[ 0.032675748998, 0.016337874499, -0.058852274411],
[ 0.061479984281, -0.002759557877, 0.013993646107],
[-0.002759557877, 0.065619321096, 0.006996823053],
[ 0.000434364406, 0.000217182203, 0.042093481225],
[-0.000016445047, -0.000008222524, -0.094292047809],
[ 0.109060186207, -0.006612206455, 0.033530326679],
[-0.006612206454, 0.118978495887, 0.016765163340],
[ 0.051799568647, 0.025899784323, -0.049145171706],
[-0.001806896115, -0.000903448057, -0.019856823712]]
])
mol = IOData.from_file(context.get_fn('test/li_h_3-21G_hf_g09.fchk'))
orbs = np.arange(mol.obasis.nbasis)
test = np.array(mol.obasis.compute_grid_orb_gradient_exp(mol.orb_alpha, points, orbs))
np.testing.assert_almost_equal(test, ref, decimal=7)
def test_grid_co_ccpv5z_cart_hf_gradient_some_points():
ref = np.array([ # from cubegen
[ 0.0, 0.0, 0.0, -0.26805895992, -0.03725931097, 26.06939895580],
[ 0.1, 0.0, 0.0, -11.66097634913, -0.02427222636, 11.49946087301],
[ 0.0, 0.1, 0.0, -0.18730587145, -11.60371334591, 11.60046471817],
[ 0.0, 0.0, 1.0, 0.00350647376, -0.00151630329, -0.00944412097],
[ 0.4, 0.2, 0.1, -0.46814335442, -0.28380627268, -0.02592227656],
[-0.4, 0.2, 0.1, 0.63742782898, -0.32989678808, 0.00444361306],
[ 0.4, -0.2, 0.1, -0.50464249640, 0.29978538874, -0.01244489023],
[ 0.4, 0.2, -0.1, -0.21837773815, -0.16855926400, 0.15518115326],
])
ref[:,:3] *= angstrom
# cubegen output somehow not reliable?
check_grid_gradient('test/co_ccpv5z_cart_hf_g03.fchk', ref, 1e-2)
def test_grid_co_ccpv5z_pure_hf_gradient_some_points():
ref = np.array([ # from cubegen
[ 0.0, 0.0, 0.0, -0.27796827654, -0.03971005800, 26.06788123216],
[ 0.1, 0.0, 0.0, -11.65999871789, -0.02706024561, 11.49763108605],
[ 0.0, 0.1, 0.0, -0.19499030621, -11.60235682832, 11.60235521243],
[ 0.0, 0.0, 1.0, 0.00184843964, 0.00026806115, -0.01003272687],
[ 0.4, 0.2, 0.1, -0.46500454519, -0.27516942731, -0.01707049479],
[-0.4, 0.2, 0.1, 0.63911725484, -0.32989616481, 0.00229353087],
[ 0.4, -0.2, 0.1, -0.51099806603, 0.29961935521, -0.00979594206],
[ 0.4, 0.2, -0.1, -0.21849813344, -0.16098019809, 0.16093849962],
])
ref[:,:3] *= angstrom
check_grid_gradient('test/co_ccpv5z_pure_hf_g03.fchk', ref, 1e-4)
def check_grid_esp(fn, ref, eps):
mol = IOData.from_file(context.get_fn(fn))
points = ref[:,:3].copy()
dm_full = mol.get_dm_full()
esps = mol.obasis.compute_grid_esp_dm(dm_full, mol.coordinates, mol.pseudo_numbers, points)
assert abs(esps - ref[:,3]).max() < eps
def test_grid_lih_321g_hf_esp_some_points():
ref = np.array([ # from cubegen
[0.0, 0.0, 0.0, 0.906151727538],
[0.1, 0.0, 0.0, 0.891755005233],
[0.0, 0.1, 0.0, 0.891755005233],
[0.0, 0.0, 1.0, 1.422294470114],
[0.4, 0.2, 0.1, 0.796490099689],
])
ref[:,:3] *= angstrom
check_grid_esp('test/li_h_3-21G_hf_g09.fchk', ref, 1e-8)
@attr('slow')
def test_grid_co_ccpv5z_cart_hf_esp_some_points():
ref = np.array([ # from cubegen
[ 0.0, 0.0, 0.0, 10.69443507172],
[ 0.1, 0.0, 0.0, 6.43122889229],
[ 0.0, 0.1, 0.0, 6.43406765938],
[ 0.0, 0.0, 1.0, 0.27023448629],
[ 0.4, 0.2, 0.1, 0.82646540602],
[-0.4, 0.2, 0.1, 0.93595072191],
[ 0.4, -0.2, 0.1, 0.83432301119],
[ 0.4, 0.2, -0.1, 0.68524674809],
])
ref[:,:3] *= angstrom
# cubegen output somehow not reliable?
check_grid_esp('test/co_ccpv5z_cart_hf_g03.fchk', ref, 1e-3)
@attr('slow')
def test_grid_co_ccpv5z_pure_hf_esp_some_points():
ref = np.array([ # from cubegen
[ 0.0, 0.0, 0.0, 10.69443507172],
[ 0.1, 0.0, 0.0, 6.43122889229],
[ 0.0, 0.1, 0.0, 6.43406765938],
[ 0.0, 0.0, 1.0, 0.27023448629],
[ 0.4, 0.2, 0.1, 0.82646540602],
[-0.4, 0.2, 0.1, 0.93595072191],
[ 0.4, -0.2, 0.1, 0.83432301119],
[ 0.4, 0.2, -0.1, 0.68524674809],
])
ref[:,:3] *= angstrom
check_grid_esp('test/co_ccpv5z_pure_hf_g03.fchk', ref, 1e-5)
def test_grid_two_index_ne():
mol = IOData.from_file(context.get_fn('test/li_h_3-21G_hf_g09.fchk'))
rtf = ExpRTransform(1e-3, 2e1, 100)
rgrid = RadialGrid(rtf)
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, (rgrid, 110), random_rotate=False)
dist0 = np.sqrt(((grid.points - mol.coordinates[0])**2).sum(axis=1))
dist1 = np.sqrt(((grid.points - mol.coordinates[1])**2).sum(axis=1))
pot = -mol.numbers[0]/dist0 - mol.numbers[1]/dist1
na_ana = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
na_grid = mol.obasis.compute_grid_density_fock(grid.points, grid.weights, pot)
# compare grid-based operator with analytical result
assert abs(na_grid).max() > 8.0
assert abs(na_ana - na_grid).max() < 2e-3
# check symmetry
np.testing.assert_almost_equal(na_grid, na_grid.T)
def test_gob_normalization():
assert abs(gob_pure_normalization(0.09515, 0) - 0.122100288) < 1e-5
assert abs(gob_pure_normalization(0.1687144, 1) - 0.154127551) < 1e-5
assert abs(gob_cart_normalization(0.344, np.array([1,1,0])) - 0.440501466) < 1e-8
assert abs(gob_cart_normalization(0.246, np.array([1,1,1])) - 0.242998767) < 1e-8
assert abs(gob_cart_normalization(0.238, np.array([2,1,1])) - 0.127073818) < 1e-8
assert abs(gob_pure_normalization(0.3, 0) - gob_cart_normalization(0.3, np.array([0, 0, 0]))) < 1e-10
assert abs(gob_pure_normalization(0.7, 0) - gob_cart_normalization(0.7, np.array([0, 0, 0]))) < 1e-10
assert abs(gob_pure_normalization(1.9, 0) - gob_cart_normalization(1.9, np.array([0, 0, 0]))) < 1e-10
assert abs(gob_pure_normalization(0.3, 1) - gob_cart_normalization(0.3, np.array([1, 0, 0]))) < 1e-10
assert abs(gob_pure_normalization(0.7, 1) - gob_cart_normalization(0.7, np.array([0, 1, 0]))) < 1e-10
assert abs(gob_pure_normalization(1.9, 1) - gob_cart_normalization(1.9, np.array([0, 0, 1]))) < 1e-10
def test_cart_pure_switch():
mol = IOData.from_file(context.get_fn('test/water.xyz'))
obasis = get_gobasis(mol.coordinates, mol.numbers, 'aug-cc-pvdz')
assert obasis.nbasis == 41
obasis = get_gobasis(mol.coordinates, mol.numbers, 'aug-cc-pvdz', pure=False)
assert obasis.nbasis == 43
def test_concatenate1():
mol = IOData.from_file(context.get_fn('test/water.xyz'))
obtmp = get_gobasis(mol.coordinates, mol.numbers, '3-21g')
ob = GOBasis.concatenate(obtmp, obtmp)
assert ob.ncenter == 3*2
assert ob.nbasis == 13*2
a = ob.compute_overlap()
assert abs(a[:13,:13] - a[:13,13:]).max() < 1e-15
assert (a[:13,:13] == a[13:,13:]).all()
assert abs(a[:13,:13] - a[13:,:13]).max() < 1e-15
def test_concatenate2():
mol = IOData.from_file(context.get_fn('test/water.xyz'))
obasis1 = get_gobasis(mol.coordinates, mol.numbers, '3-21g')
obasis2 = get_gobasis(mol.coordinates, mol.numbers, 'sto-3g')
obasis = GOBasis.concatenate(obasis1, obasis2)
assert obasis.ncenter == 3*2
assert obasis.nbasis == obasis1.nbasis + obasis2.nbasis
a = obasis.compute_overlap()
a11 = obasis1.compute_overlap()
a22 = obasis2.compute_overlap()
N = obasis1.nbasis
assert (a[:N,:N] == a11).all()
assert (a[N:,N:] == a22).all()
def test_abstract():
with assert_raises(NotImplementedError):
centers = np.zeros((1,3), float)
shell_map = np.zeros(2, int)
nprims = np.array([1, 2])
shell_types = np.array([0, 1])
alphas = np.array([1.0, 1.1, 1.2])
con_coeffs = np.array([0.1, 0.2, 0.3])
from horton.gbasis.cext import GBasis
gb = GBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
def test_gobasis_desc_element_map():
gobd = GOBasisDesc('3-21G', {'H': 'sto-3g', 2: 'cc-pVQZ'})
coordinates = np.zeros([3, 3])
numbers = np.array([1, 2, 3])
obasis = gobd.apply_to(coordinates, numbers)
assert obasis.centers.shape == (3, 3)
# H
assert obasis.shell_map[0] == 0
assert obasis.nprims[0] == 3
# He
assert (obasis.shell_map[1:11] == 1).all()
assert (obasis.nprims[1:11] == [4, 1, 1, 1, 1, 1, 1, 1, 1, 1]).all()
# Li
assert (obasis.shell_map[11:] == 2).all()
assert (obasis.nprims[11:] == [3, 2, 2, 1, 1]).all()
def test_gobasis_desc_index_map():
gobd = GOBasisDesc('3-21G', index_map={1: 'sto-3g', 2: 'cc-pVQZ'})
coordinates = np.zeros([3, 3])
numbers = np.array([1, 1, 1])
obasis = gobd.apply_to(coordinates, numbers)
assert obasis.centers.shape == (3, 3)
# H
assert (obasis.shell_map[:2] == 0).all()
assert (obasis.nprims[:2] == [2, 1]).all()
# He
assert (obasis.shell_map[2:3] == 1).all()
assert (obasis.nprims[2:3] == 3).all()
# Li
assert (obasis.shell_map[3:] == 2).all()
assert (obasis.nprims[3:] == [3, 1, 1, 1, 1, 1, 1, 1, 1, 1]).all()
def test_gobasis_output_args_grid_orbitals_exp():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
points = np.random.uniform(-5, 5, (100, 3))
iorbs = np.array([2, 3])
orbs1 = np.zeros((100, 2), float)
mol.obasis.compute_grid_orbitals_exp(mol.orb_alpha, points, iorbs, orbs1)
orbs2 = mol.obasis.compute_grid_orbitals_exp(mol.orb_alpha, points, iorbs)
assert (orbs1 == orbs2).all()
def test_gobasis_output_args_grid_density_dm():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
points = np.random.uniform(-5, 5, (100, 3))
rhos1 = np.zeros(100, float)
dm_full = mol.get_dm_full()
mol.obasis.compute_grid_density_dm(dm_full, points, rhos1)
rhos2 = mol.obasis.compute_grid_density_dm(dm_full, points)
assert (rhos1 == rhos2).all()
def test_gobasis_output_args_grid_gradient_dm():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
points = np.random.uniform(-5, 5, (100, 3))
gradrhos1 = np.zeros((100, 3), float)
dm_full = mol.get_dm_full()
mol.obasis.compute_grid_gradient_dm(dm_full, points, gradrhos1)
gradrhos2 = mol.obasis.compute_grid_gradient_dm(dm_full, points)
assert (gradrhos1 == gradrhos2).all()
def test_gobasis_output_args_grid_hartree_dm():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
points = np.random.uniform(-5, 5, (100, 3))
pots1 = np.zeros(100, float)
dm_full = mol.get_dm_full()
mol.obasis.compute_grid_hartree_dm(dm_full, points, pots1)
pots2 = mol.obasis.compute_grid_hartree_dm(dm_full, points)
assert (pots1 == pots2).all()
def test_subset_simple():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
# select a basis set for the first hydrogen atom
sub_obasis, ibasis_list = mol.obasis.get_subset([0,1])
assert sub_obasis.ncenter == 1
assert sub_obasis.nshell == 2
assert (sub_obasis.centers[0] == mol.obasis.centers[0]).all()
assert (sub_obasis.shell_map == mol.obasis.shell_map[:2]).all()
assert (sub_obasis.nprims == mol.obasis.nprims[:2]).all()
assert (sub_obasis.shell_types == mol.obasis.shell_types[:2]).all()
assert sub_obasis.nprim_total == 3
assert (sub_obasis.alphas == mol.obasis.alphas[:3]).all()
assert (sub_obasis.con_coeffs == mol.obasis.con_coeffs[:3]).all()
assert (ibasis_list == [0, 1]).all()
def test_subset_simple_reverse():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
# select a basis set for the first hydrogen atom
sub_obasis, ibasis_list = mol.obasis.get_subset([1,0])
assert sub_obasis.ncenter == 1
assert sub_obasis.nshell == 2
assert (sub_obasis.centers[0] == mol.obasis.centers[0]).all()
assert (sub_obasis.shell_map == mol.obasis.shell_map[1::-1]).all()
assert (sub_obasis.nprims == mol.obasis.nprims[1::-1]).all()
assert (sub_obasis.shell_types == mol.obasis.shell_types[1::-1]).all()
assert sub_obasis.nprim_total == 3
assert (sub_obasis.alphas[:1] == mol.obasis.alphas[2:3]).all()
assert (sub_obasis.alphas[1:] == mol.obasis.alphas[:2]).all()
assert (sub_obasis.con_coeffs[:1] == mol.obasis.con_coeffs[2:3]).all()
assert (sub_obasis.con_coeffs[1:] == mol.obasis.con_coeffs[:2]).all()
assert (ibasis_list == [1, 0]).all()
def test_subset():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
# select a basis set for the first hydrogen atom
sub_obasis, ibasis_list = mol.obasis.get_subset([7, 3, 4, 8])
assert sub_obasis.ncenter == 2
assert sub_obasis.nshell == 4
assert (sub_obasis.centers[0] == mol.obasis.centers[1]).all()
assert (sub_obasis.centers[1] == mol.obasis.centers[2]).all()
assert (sub_obasis.shell_map == mol.obasis.shell_map[[7, 3, 4, 8]]-1).all()
assert (sub_obasis.nprims == mol.obasis.nprims[[7, 3, 4, 8]]).all()
assert (sub_obasis.shell_types == mol.obasis.shell_types[[7, 3, 4, 8]]).all()
assert sub_obasis.nprim_total == 7
for b0, e0, b1, e1 in (12, 14, 0, 2), (6, 8, 2, 4), (8, 10, 4, 6), (14, 15, 6, 7):
assert (sub_obasis.alphas[b1:e1] == mol.obasis.alphas[b0:e0]).all()
assert (sub_obasis.con_coeffs[b1:e1] == mol.obasis.con_coeffs[b0:e0]).all()
assert (ibasis_list == [11, 3, 4, 5, 6, 12]).all()
def test_basis_atoms():
mol = IOData.from_file(context.get_fn('test/water_hfs_321g.fchk'))
basis_atoms = mol.obasis.get_basis_atoms(mol.coordinates)
assert len(basis_atoms) == 3
icenter = 0
ibasis_all = []
for sub_obasis, ibasis_list in basis_atoms:
assert sub_obasis.ncenter == 1
assert (sub_obasis.centers[0] == mol.obasis.centers[icenter]).all()
icenter += 1
ibasis_all.extend(ibasis_list)
assert ibasis_all == range(mol.obasis.nbasis)
def check_normalization(number, basis):
"""Helper function to test the normalization of contracted basis sets.
Parameters
----------
number : int
Element to test. (Keep in mind that not all elements are supported in most
basis sets.)
basis : str
The basis set, e.g. cc-pvdz.
"""
# Run test on a Helium atom
mol = IOData(coordinates=np.array([[0.0, 0.0, 0.0]]), numbers=np.array([number]))
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, basis)
# Compute Gaussian integrals
olp = obasis.compute_overlap()
np.testing.assert_almost_equal(np.diag(olp), 1.0)
def test_normalization_ccpvdz():
for number in xrange(1, 18+1):
check_normalization(number, 'cc-pvdz')
|
QuantumElephant/horton
|
horton/gbasis/test/test_gobasis.py
|
Python
|
gpl-3.0
| 26,297
|
[
"Gaussian"
] |
7d20db00664e5918f46ff9211dfb9e517eef0c1b682569d4e2fd8d27bf0812a7
|
from tinypy.parser.TinyPyParser import TinyPyParser
from tinypy.parser.TinyPyVisitor import TinyPyVisitor
from tinypy import AST
from tinypy import runtime
class StmtVisitorMixin(TinyPyVisitor):
#
# Base statements
#
def visitSimple_stmt(self, ctx:TinyPyParser.Simple_stmtContext):
statements = []
for smallStmt in ctx.small_stmt():
statement = self.visit(smallStmt)
if statement != None:
statements.append(statement)
return statements
#
# Compound statements
#
def visitSuite(self, ctx:TinyPyParser.SuiteContext):
if ctx.simple_stmt() != None:
return self.visit(ctx.simple_stmt())
statements = []
for stmt in ctx.stmt():
if stmt.simple_stmt() != None:
statements += self.visit(stmt.simple_stmt())
else:
statements.append(self.visit(stmt))
return statements
def visitIf_stmt(self, ctx:TinyPyParser.If_stmtContext):
test = self.visit(ctx.test())
suite = self.visit(ctx.suite())
orelse = []
if ctx.if_else() != None:
orelse = self.visit(ctx.if_else().suite())
if ctx.if_elif() != None and len(ctx.if_elif()) >= 1:
elifNodes = ctx.if_elif().copy()
elifNodes.reverse()
for node in elifNodes:
nodeTest = self.visit(node.test())
nodeSuite = self.visit(node.suite())
orelse = [AST.stmt.IfStmt(test=nodeTest, body=nodeSuite, orelse=orelse)]
return AST.stmt.IfStmt(test=test, body=suite, orelse=orelse)
def visitWhile_stmt(self, ctx:TinyPyParser.While_stmtContext):
test = self.visit(ctx.test())
suite = self.visit(ctx.suite())
return AST.stmt.WhileStmt(test=test, body=suite, orelse=[])
def visitFor_stmt(self, ctx:TinyPyParser.For_stmtContext):
expr = self.visit(ctx.nameaccess())
test = self.visit(ctx.test())
suite = self.visit(ctx.suite())
return AST.stmt.ForStmt(target=expr, iter=test, body=suite)
def visitFuncdef(self, ctx:TinyPyParser.FuncdefContext):
name = ctx.NAME().getText()
suite = self.visit(ctx.suite())
param_ctx = ctx.parameters().param_argslist()
params = []
if param_ctx != None:
for argName in param_ctx.NAME():
params.append(argName.getText())
return AST.stmt.FunctionDef(name=name, args=params, body=suite)
#
# Small statements
#
def visitExprStmtAssign(self, ctx:TinyPyParser.ExprStmtAssignContext):
name = self.visit(ctx.nameaccess())
expr = self.visit(ctx.test())
return AST.stmt.AssignStmt(target=name, value=expr)
def visitExprStmtAugmented(self, ctx:TinyPyParser.ExprStmtAugmentedContext):
name = self.visit(ctx.nameaccess())
value = self.visit(ctx.test())
op = ctx.augassign().getText()
return AST.stmt.AugAssignStmt(name=name, value=value, op=op)
#
# Control flow statements
#
def visitReturn_stmt(self, ctx:TinyPyParser.Return_stmtContext):
test = None
validParents = (TinyPyParser.FuncdefContext, )
if not self.validContextParents(ctx, validParents):
raise runtime.Errors.SyntaxError("'return' outside function")
if ctx.test() != None:
test = self.visit(ctx.test())
return AST.stmt.ReturnStmt(expr=test)
def visitPass_stmt(self, ctx:TinyPyParser.Pass_stmtContext):
return AST.stmt.PassStmt()
def visitBreak_stmt(self, ctx:TinyPyParser.Break_stmtContext):
validParents = TinyPyParser.For_stmtContext, TinyPyParser.While_stmtContext
if not self.validContextParents(ctx, validParents):
raise runtime.Errors.SyntaxError("'break' outside loop")
return AST.stmt.BreakStmt()
def visitContinue_stmt(self, ctx:TinyPyParser.Continue_stmtContext):
validParents = TinyPyParser.For_stmtContext, TinyPyParser.While_stmtContext
if not self.validContextParents(ctx, validParents):
raise runtime.Errors.SyntaxError("'continue' outside loop")
return AST.stmt.ContinueStmt()
#
# Check whether context has one of the specified proper parents
#
def validContextParents(self, context, properParents:tuple):
context = context.parentCtx
while context != None:
context = context.parentCtx
if isinstance(context, properParents):
return True
return False
|
maxmalysh/tiny-py-interpreter
|
tinypy/AST/builder/StmtVisitor.py
|
Python
|
mit
| 4,619
|
[
"VisIt"
] |
adff231785622d2f070ea94ef35e1017a0939fe5691caedf3f8f37a80099a68a
|
"""Defines schema for electromagnetic-related nodes."""
import enum
from schematics import types
from spins.invdes.problem_graph import optplan
from spins.invdes.problem_graph import schema_utils
BOUNDARY_CONDITION_TYPES = []
MESH_TYPES = []
class Material(schema_utils.Model):
"""Defines a material.
A material can be defined either by a name (e.g. "silicon") or by refractive
refractive index.
Attributes:
mat_name: Name of a material. This needs to be a material defined in
`spins.material`.
mat_file: Path of CSV containing wavelength (microns),n,k columns.
The format is the same as CSV's from refractiveindex.info.
index: Refractive index value.
"""
mat_name = types.StringType()
mat_file = types.StringType()
index = types.PolyModelType(optplan.ComplexNumber)
class GdsMaterialStackLayer(schema_utils.Model):
"""Defines a single layer in a material stack.
Attributes:
foreground: Material to fill any structure in the layer.
background: Material to fill any non-structure areas in the layer.
extents: Start and end coordiantes of the layer stack.
gds_layer: Name of GDS layer that contains the polygons for this layer.
"""
foreground = types.ModelType(Material)
background = types.ModelType(Material)
extents = optplan.vec2d()
gds_layer = types.ListType(types.IntType())
class GdsMaterialStack(schema_utils.Model):
"""Defines a material stack.
This is used by `GdsEps` to define the permittivity distribution.
Attributes:
background: Material to fill any regions that are not covered by
a material stack layer.
stack: A list of `MaterialStackLayer` that defines permittivity for
each layer.
"""
background = types.ModelType(Material)
stack = types.ListType(types.ModelType(GdsMaterialStackLayer))
class EpsilonSpec(schema_utils.Model):
"""Describes a specification for permittivity distribution."""
@schema_utils.polymorphic_model()
class GdsEps(EpsilonSpec):
"""Defines a permittivity distribution using a GDS file.
The GDS file will be flattened so that each layer only contains polygons.
TODO(logansu): Expand description.
Attributes:
type: Must be "gds_epsilon".
gds: URI of GDS file.
mat_stack: Description of each GDS layer permittivity values and
thicknesses.
stack_normal: Direction considered the normal to the stack.
"""
type = schema_utils.polymorphic_model_type("gds")
gds = types.StringType()
mat_stack = types.ModelType(GdsMaterialStack)
stack_normal = optplan.vec3d()
class Mesh(schema_utils.Model):
"""Defines a mesh to draw.
Meshes are used to define permittivities through `GdsMeshEps`.
"""
@schema_utils.polymorphic_model()
class GdsMesh(Mesh):
"""Defines a mesh by using polygons from a GDS file.
The mesh is defined by extruding the polygon along the stack normal with
coordinates given by `extents`.
Attributes:
material: Material to use for mesh.
extents: Start and end location of mesh in the extrusion direction.
gds_layer: Tuple `(layer, datatype)` of the GDS file from which to
extract the polygons.
"""
type = schema_utils.polymorphic_model_type("mesh.gds_mesh")
material = types.ModelType(Material)
extents = optplan.vec2d()
gds_layer = types.ListType(types.IntType())
@schema_utils.polymorphic_model()
class SlabMesh(Mesh):
"""Defines a slab.
A slab is a rectangular prism that has a finite extent along the extrusion
axis and infinite extent in the other two directions. Slabs are commonly
used to draw a background permittivity distribution before drawing
other meshes.
Attributes:
material: Material to use for slab.
extents: Start and end location of slab in the extrusion direction.
"""
type = schema_utils.polymorphic_model_type("mesh.slab")
material = types.ModelType(Material)
extents = optplan.vec2d()
@schema_utils.polymorphic_model()
class GdsMeshEps(EpsilonSpec):
"""Defines a permittivity distribution by a lits of meshes.
The meshes are drawn in order of the list. Consequently, if meshes overlap,
the mesh drawn later will take precedence.
Attributes:
gds: GDS file to use for `GdsMesh` types.
background: Default background permittivity.
mesh_list: List of meshes to draw.
stack_normal: Direction considered the normal to the stack.
"""
type = schema_utils.polymorphic_model_type("gds_mesh")
gds = types.StringType()
background = types.ModelType(Material)
mesh_list = types.ListType(types.PolyModelType(Mesh))
stack_normal = optplan.vec3d()
@schema_utils.polymorphic_model()
class ParamEps(EpsilonSpec):
"""Defines a permittivity distribution based on a parametriation.
Attributes:
type: Must be "parametrization".
parametrization: Name of the parametrization.
simulation_space: Name of the simulation space.
wavelength: Wavelength.
"""
type = schema_utils.polymorphic_model_type("parametrization")
parametrization = optplan.ReferenceType(optplan.Parametrization)
simulation_space = optplan.ReferenceType(optplan.SimulationSpaceBase)
wavelength = types.FloatType()
@schema_utils.polymorphic_model(MESH_TYPES)
class UniformMesh(schema_utils.Model):
"""Defines a uniform mesh.
Attributes:
type: Must be "uniform".
dx: Unit cell distance for EM grid (nm).
"""
type = schema_utils.polymorphic_model_type("uniform")
dx = types.FloatType()
@schema_utils.polymorphic_model(BOUNDARY_CONDITION_TYPES)
class BlochBoundary(schema_utils.Model):
"""Represents a Bloch boundary condition.
Attributes:
bloch_vector: 3D Bloch optplan.vector.
"""
type = schema_utils.polymorphic_model_type("bloch")
bloch_vector = optplan.vec3d(default=[0, 0, 0])
@schema_utils.polymorphic_model(BOUNDARY_CONDITION_TYPES)
class PecBoundary(schema_utils.Model):
"""Represents PEC boundary."""
type = schema_utils.polymorphic_model_type("pec")
@schema_utils.polymorphic_model(BOUNDARY_CONDITION_TYPES)
class PmcBoundary(schema_utils.Model):
"""Represents PMC boundary."""
type = schema_utils.polymorphic_model_type("pmc")
class SelectionMatrixType(enum.Enum):
"""Defines possible types for selection matrices."""
# Direct lattice selection matrix where we select out all points in the
# Yee grid.
DIRECT = "direct_lattice"
# Same as `DIRECT` but permittivity values along the extrusion direction
# are not constrained to be equal to each other.
FULL_DIRECT = "full_direct"
# Design dimensions is reduced by factor of 4 by parametrizing only the "z"
# component.
REDUCED = "uniform"
@optplan.register_node_type()
class SimulationSpace(optplan.SimulationSpaceBase):
"""Defines a simulation space.
A simulation space contains information regarding the permittivity
distributions but not the fields, i.e. no information regarding sources
and wavelengths.
Attributes:
name: Name to identify the simulation space. Must be unique.
eps_fg: Foreground permittivity.
eps_bg: Background permittivity.
mesh: Meshing information. This describes how the simulation region
should be meshed.
sim_region: Rectangular prism simulation domain.
selection_matrix_type: The type of selection matrix to form. This
is subject to change.
"""
type = schema_utils.polymorphic_model_type("simulation_space")
eps_fg = types.PolyModelType(EpsilonSpec)
eps_bg = types.PolyModelType(EpsilonSpec)
mesh = types.PolyModelType(MESH_TYPES)
sim_region = types.ModelType(optplan.Box3d)
boundary_conditions = types.ListType(
types.PolyModelType(BOUNDARY_CONDITION_TYPES), min_size=6, max_size=6)
pml_thickness = types.ListType(types.IntType(), min_size=6, max_size=6)
selection_matrix_type = types.StringType(
default=SelectionMatrixType.DIRECT.value,
choices=tuple(select_type.value for select_type in SelectionMatrixType),
)
@optplan.register_node_type()
class WaveguideMode(optplan.ProblemGraphNode):
"""Represents basic information for a waveguide mode.
This class is not intended to be instantiable.
Attributes:
center: Waveguide center.
extents: Width and height of waveguide mode region.
normal: Normal direction of the waveguide. Note that this is also the
mode propagation direction.
mode_num: Mode number. The mode with largest propagation constant is
mode 0, the mode with second largest propagation constant is mode 1,
etc.
power: The transmission power of the mode.
"""
type = schema_utils.polymorphic_model_type("em.waveguide_mode")
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType()
@optplan.register_node_type()
class WaveguideModeSource(optplan.EmSource):
"""Represents a waveguide mode.
The waveguide is assumed to be axis-aligned.
Attributes:
center: Waveguide center.
extents: Width and height of waveguide mode region.
normal: Normal direction of the waveguide. Note that this is also the
mode propagation direction.
mode_num: Mode number. The mode with largest propagation constant is
mode 0, the mode with second largest propagation constant is mode 1,
etc.
power: The transmission power of the mode.
"""
type = schema_utils.polymorphic_model_type("source.waveguide_mode")
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType()
@optplan.register_node_type()
class WaveguideModeOverlap(optplan.EmOverlap):
"""Represents a waveguide mode.
The waveguide is assumed to be axis-aligned.
Attributes:
center: Waveguide center.
extents: Width and height of waveguide mode region.
normal: Normal direction of the waveguide. Note that this is also the
mode propagation direction.
mode_num: Mode number. The mode with largest propagation constant is
mode 0, the mode with second largest propagation constant is mode 1,
etc.
power: The transmission power of the mode.
"""
type = schema_utils.polymorphic_model_type("overlap.waveguide_mode")
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType()
@optplan.register_node_type()
class ImportOverlap(optplan.EmOverlap):
"""Represents a imported overlap vector.
Attributes:
file_name: .mat file containing the overlap vector.
center: the center coordinate of the overlap, allows for translation
of the overlap to the specified center.
"""
type = schema_utils.polymorphic_model_type("overlap.import_field_vector")
file_name = types.StringType()
center = optplan.vec3d()
@optplan.register_node_type()
class PlaneWaveSource(optplan.EmSource):
"""Represents a plane wave source.
Attributes:
type: Must be "source.plane_wave".
"""
type = schema_utils.polymorphic_model_type("source.plane_wave")
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
theta = types.FloatType()
psi = types.FloatType()
polarization_angle = types.FloatType()
overwrite_bloch_vector = types.BooleanType()
border = types.ListType(types.FloatType())
power = types.FloatType()
normalize_by_sim = types.BooleanType(default=False)
@optplan.register_node_type()
class GaussianSource(optplan.EmSource):
"""Represents a gaussian source.
Attributes:
type: Must be "source.gaussian_beam".
normalize_by_sim: If `True`, normalize the power by running a
simulation.
"""
type = schema_utils.polymorphic_model_type("source.gaussian_beam")
w0 = types.FloatType()
center = optplan.vec3d()
beam_center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
theta = types.FloatType()
psi = types.FloatType()
polarization_angle = types.FloatType()
overwrite_bloch_vector = types.BooleanType()
power = types.FloatType()
normalize_by_sim = types.BooleanType(default=False)
@optplan.register_node_type()
class DipoleSource(optplan.EmSource):
"""Represents a dipole source.
Attributes:
position: Position of the dipole (will snap to grid).
axis: Direction of the dipole (x:0, y:1, z:2).
phase: Phase of the dipole source (in radian).
power: Power assuming uniform dielectric space with the permittivity.
"""
type = schema_utils.polymorphic_model_type("source.dipole_source")
position = optplan.vec3d()
axis = types.IntType()
phase = types.FloatType()
power = types.FloatType()
normalize_by_sim = types.BooleanType(default=False)
@optplan.register_node_type()
class WaveguideModeEigSource(optplan.EmSource):
"""Represents a photonic crystal waveguide mode.
The waveguide does NOT have to be axis-aligned. The waveguide mode is
computed as a 3D eigenmode solve.
Attributes:
center: Waveguide center.
extents: Width and height of waveguide mode region.
normal: Normal direction of the waveguide. Note that this is also the
mode propagation direction.
mode_num: Mode number. The mode with largest propagation constant is
mode 0, the mode with second largest propagation constant is mode 1,
etc.
power: The transmission power of the mode.
"""
type = schema_utils.polymorphic_model_type("source.waveguide_mode_eig")
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType()
@optplan.register_node_type()
class WaveguideModeEigOverlap(optplan.EmOverlap):
"""Represents a photonic crystal waveguide mode.
The waveguide does NOT have to be axis-aligned. The waveguide mode is
computed as a 3D eigenmode solve.
Attributes:
center: Waveguide center.
extents: Width and height of waveguide mode region.
normal: Normal direction of the waveguide. Note that this is also the
mode propagation direction.
mode_num: Mode number. The mode with largest propagation constant is
mode 0, the mode with second largest propagation constant is mode 1,
etc.
power: The transmission power of the mode.
"""
type = schema_utils.polymorphic_model_type("overlap.waveguide_mode_eig")
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType()
@optplan.register_node_type()
class FdfdSimulation(optplan.Function):
"""Defines a FDFD simulation.
Attributes:
type: Must be "function.fdfd_simulation".
name: Name of simulation.
simulation_space: Simulation space name.
source: Source name.
wavelength: Wavelength at which to simulate.
solver: Name of solver to use.
bloch_vector: bloch optplan.vector at which to simulate.
"""
type = schema_utils.polymorphic_model_type("function.fdfd_simulation")
simulation_space = optplan.ReferenceType(optplan.SimulationSpaceBase)
epsilon = optplan.ReferenceType(optplan.Function)
source = optplan.ReferenceType(optplan.EmSource)
wavelength = types.FloatType()
solver = types.StringType(choices=("maxwell_bicgstab", "maxwell_cg",
"local_direct"))
bloch_vector = types.ListType(types.FloatType())
@optplan.register_node_type()
class Epsilon(optplan.Function):
"""Defines a Epsilon Grid.
Attributes:
type: Must be "function.epsilon".
name: Name of epsilon.
simulation_space: Simulation space name.
wavelength: Wavelength at which to calculate epsilon.
"""
type = schema_utils.polymorphic_model_type("function.epsilon")
simulation_space = optplan.ReferenceType(optplan.SimulationSpaceBase)
wavelength = types.FloatType()
structure = optplan.ReferenceType(optplan.Parametrization)
@optplan.register_node_type()
class Overlap(optplan.Function):
"""Defines an overlap integral.
Attributes:
type: Must be "function.overlap".
simulation: Simulation from which electric fields are obtained.
overlap: Overlap type to use.
"""
type = schema_utils.polymorphic_model_type("function.overlap")
simulation = optplan.ReferenceType(optplan.Function)
overlap = optplan.ReferenceType(optplan.EmOverlap)
@optplan.register_node_type()
class DiffEpsilon(optplan.Function):
"""Defines a function that finds the L1 norm between two permittivities.
Specifially, the function is defined as `sum(|epsilon - epsilon_ref|)`.
Attributes:
type: Must be "function.diff_epsilon".
epsilon: Permittivity.
epsilon_ref: Base permittivity to compare to.
"""
type = schema_utils.polymorphic_model_type("function.diff_epsilon")
epsilon = optplan.ReferenceType(optplan.Function)
epsilon_ref = types.PolyModelType(EpsilonSpec)
|
stanfordnqp/spins-b
|
spins/invdes/problem_graph/optplan/schema_em.py
|
Python
|
gpl-3.0
| 17,659
|
[
"CRYSTAL",
"Gaussian"
] |
8781ffd65b2f52097d777f4e3db5e2e427de5b194296017fc2896f02851dbb4d
|
"""
Tests for discussion pages
"""
import datetime
from uuid import uuid4
from nose.plugins.attrib import attr
from pytz import UTC
from .helpers import BaseDiscussionTestCase
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ...pages.lms.learner_profile import LearnerProfilePage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
MultipleThreadFixture)
from .helpers import BaseDiscussionMixin
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_2')
class DiscussionHomePageTest(UniqueCourseTest):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
@attr('shard_2')
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_2')
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
def setup_multiple_threads(self, thread_count):
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
thread_body = "Dummy Long text body." * 50
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id, body=thread_body),
)
self.thread_ids.append(thread_id)
view = MultipleThreadFixture(threads)
view.push()
def test_page_scroll_on_thread_change_view(self):
"""
Check switching between threads changes the page focus
"""
# verify threads are rendered on the page
self.assertTrue(
self.thread_page_1.check_threads_rendered_successfully(thread_count=self.thread_count)
)
# From the thread_page_1 open & verify next thread
self.thread_page_1.click_and_open_thread(thread_id=self.thread_ids[1])
self.assertTrue(self.thread_page_2.is_browser_on_page())
# Verify that the focus is changed
self.thread_page_2.check_focus_is_set(selector=".discussion-article")
@attr('shard_2')
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .display-vote'))
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .display-vote'))
@attr('shard_2')
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_2')
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
def test_edit_response_add_link(self):
"""
Scenario: User submits valid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new link
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://example.com"
description = "example"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"link", response_id, url, description)
page.submit_response_edit(response_id, description)
expected_response_html = (
'<p><a href="{}">{}</a></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_image(self):
"""
Scenario: User submits valid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = "image from example.com"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"image", response_id, url, description)
page.submit_response_edit(response_id, '')
expected_response_html = (
'<p><img src="{}" alt="{}" title=""></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_image_error_msg(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"image", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
def test_edit_response_add_decorative_image(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter an empty string in the Description input field
And I check the 'image is decorative' checkbox
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = ""
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "Some content")
page.add_content_via_editor_button(
"image", response_id, url, description, is_decorative=True)
page.submit_response_edit(response_id, "Some content")
expected_response_html = (
'<p>Some content<img src="{}" alt="{}" title=""></p>'.format(
url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_link_error_msg(self):
"""
Scenario: User submits invalid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"link", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should be voted successfully
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should be reported successfully
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.vote_response('response_self_author')
page.vote_response('response_other_author')
page.report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('shard_2')
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_2')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.expand()
def setup_multiple_inline_threads(self, thread_count):
"""
Set up multiple treads on the page by passing 'thread_count'
"""
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id),
)
self.thread_ids.append(thread_id)
thread_fixture = MultipleThreadFixture(threads)
thread_fixture.add_response(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)],
threads[0]
)
thread_fixture.push()
def test_page_while_expanding_inline_discussion(self):
"""
Tests for the Inline Discussion page with multiple treads. Page should not focus 'thread-wrapper'
after loading responses.
"""
self.setup_multiple_inline_threads(thread_count=3)
self.discussion_page.expand_discussion()
thread_page = InlineDiscussionThreadPage(self.browser, self.thread_ids[0])
thread_page.expand()
# Check if 'thread-wrapper' is focused after expanding thread
self.assertFalse(thread_page.check_if_selector_is_focused(selector='.thread-wrapper'))
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
def test_dual_discussion_module(self):
"""
Scenario: Two discussion module in one unit shouldn't override their actions
Given that I'm on courseware page where there are two inline discussion
When I click on one discussion module new post button
Then it should add new post form of that module in DOM
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
And I click on second discussion module new post button
Then it should add new post form of second module in DOM
And I should be shown second discussion new post form
And I shouldn't be shown first discussion module new post form
And I have two new post form in the DOM
When I click back on first module new post button
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
self.discussion_page.click_new_post_button()
with self.discussion_page.handle_alert():
self.discussion_page.click_cancel_new_post()
self.additional_discussion_page.click_new_post_button()
self.assertFalse(self.discussion_page._is_element_visible(".new-post-article"))
with self.additional_discussion_page.handle_alert():
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
self.assertFalse(self.additional_discussion_page._is_element_visible(".new-post-article"))
@attr('shard_2')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
return page
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
def test_pagination_window_reposition(self):
page = self.check_pages(50)
page.click_next_page()
page.wait_for_ajax()
self.assertTrue(page.is_window_on_top())
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on left sidebar
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = self.check_pages(1)
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
@attr('shard_2')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_2')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "activity")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
10clouds/edx-platform
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 52,662
|
[
"VisIt"
] |
e473f4f94ea9bc2f237f4fca04aef21525e8d5b638dad1212b556026e914da27
|
# -*- coding: utf-8 -*-
"""cdk - course development toolkit. Convert asciidoc input to deck.js slidedecks with many
code/development oriented features.
Usage:
cdk [-vbo] [--toc] [--notransition] [--logo=<logo>] [--theme=<theme>] [--custom-css=<cssfile>] FILE
cdk --install-theme=<theme>
cdk --default-theme=<theme>
cdk --generate=<name>
Arguments:
FILE asciidoc source file
Options:
--install-theme <theme> http path to zipfile containing theme. Will be unzipped in
theme directory.
--default-theme <theme> Theme to be the default used theme when creating slide decks
--theme <theme> Theme to be used to create slide deck
--logo <logo> Logo file to be used in theme. Logo should be ~200x200px image.
Guaranteed support only in "plain" theme.
--custom-css <cssfile> Additional style rules to be added to the slide deck. You'll be
responsible for packing any external resources (images, fonts, etc).
--generate <name> Generate sample slide source in file name. Try "slides.asc"
-v --verbose Verbose output from underlying commands
-b --bare Simple html output, no slideshow.
-o --open Open the compiled slide presentation automatically
--toc Add Table of Contents to output. Typically used with -b. When used
on normal slide deck adds section numbers to pop-up TOC.
--notransition Don't use transitions between slides.
-h --help Show this screen.
"""
from __future__ import print_function
import os
import subprocess
import webbrowser
import zipfile
from os.path import (dirname, basename, join, abspath, isfile, isdir,
expanduser, splitext)
from os import mkdir, unlink, listdir
from shutil import copy
# Python version compat checks/fixes
try:
import ConfigParser as cp # Python 2
except ImportError:
import configparser as cp # Python 3
try:
subprocess.check_output
except AttributeError:
import to6
subprocess.check_output = to6.check_output
from docopt import docopt
LOCATION = abspath(dirname(__file__))
PREFS_DIR = expanduser("~/.cdk")
PREFS_FILE = join(PREFS_DIR, "prefs")
THEMES_DIR = join(LOCATION, "custom", "deck.js", "themes")
def set_default_theme(theme):
"""
Set default theme name based in config file.
"""
pref_init() # make sure config files exist
parser = cp.ConfigParser()
parser.read(PREFS_FILE)
# Do we need to create a section?
if not parser.has_section("theme"):
parser.add_section("theme")
parser.set("theme", "default", theme)
# best way to make sure no file truncation?
with open("%s.2" % PREFS_FILE, "w") as fp:
parser.write(fp)
copy("%s.2" % PREFS_FILE, PREFS_FILE)
unlink("%s.2" % PREFS_FILE,)
def pick_theme(manual):
"""
Return theme name based on manual input, prefs file, or default to "plain".
"""
if manual:
return manual
pref_init()
parser = cp.ConfigParser()
parser.read(PREFS_FILE)
try:
theme = parser.get("theme", "default")
except (cp.NoSectionError, cp.NoOptionError):
theme = "plain"
return theme
def pref_init():
"""Can be called without penalty. Create ~/.cdk dir if it doesn't
exist. Copy the default pref file if it doesn't exist."""
# make sure we have a ~/.cdk dir
if not isdir(PREFS_DIR):
mkdir(PREFS_DIR)
# make sure we have a default prefs file
if not isfile(PREFS_FILE):
copy(join(LOCATION, "custom", "prefs"), PREFS_DIR)
def install_theme(path_to_theme):
"""
Pass a path to a theme file which will be extracted to the themes directory.
"""
pref_init()
# cp the file
filename = basename(path_to_theme)
dest = join(THEMES_DIR, filename)
copy(path_to_theme, dest)
# unzip
zf = zipfile.ZipFile(dest)
# should make sure zipfile contains only themename folder which doesn't conflict
# with existing themename. Or some kind of sanity check
zf.extractall(THEMES_DIR) # plus this is a potential security flaw pre 2.7.4
# remove the copied zipfile
unlink(dest)
def create_command(theme, bare=False, toc=False, notransition=False, logo=None, filters_list=None):
# default filters
if not filters_list:
filters_list = ["source/source-highlight-filter.conf",
"graphviz/graphviz-filter.conf"]
# vars for locations
DATA_DIR = join(LOCATION, "data")
CUSTOM_DIR = join(LOCATION, "custom")
ASCIIDOC_DIR = join(DATA_DIR, "asciidoc-8.6.8")
# Setup asciidoc command we want to run with backend
if bare:
backend = "--conf-file=%(CUSTOM_DIR)s/html5.conf "
else:
backend = "--conf-file=%(CUSTOM_DIR)s/deckjs.conf "
toc_directive = ''
if toc:
toc_directive = '-a toc -a numbered'
transition = ''
if notransition:
transition = '-a notransition'
logo_directive = ''
if logo:
logo_directive = '-a logo=%(logo)s'
filters = ["--conf-file=%(ASCIIDOC_DIR)s/filters/{0}".format(f) for f in filters_list]
filters = " ".join(filters)
cmd = " ".join(["python %(ASCIIDOC_DIR)s/asciidoc.py",
"--no-conf --conf-file=%(CUSTOM_DIR)s/asciidoc.conf",
backend,
filters,
transition,
"-b deckjs",
"-a deckjs_theme=%(theme)s -a data-uri",
"-a backend-confdir=%(CUSTOM_DIR)s",
logo_directive,
toc_directive,
"-a iconsdir=%(DATA_DIR)s/asciidoc-8.6.8/images/icons -a icons"]) % locals()
return cmd.split()
def run_command(cmd, args):
if args['--verbose']:
cmd.append('-v')
print("\n".join(cmd) + args['FILE'])
cmd.append(args['FILE'])
try:
print(subprocess.check_output(cmd))
except subprocess.CalledProcessError as e:
exit(e.output)
# Low-level functionality, exposed for easier testing.
def add_css_to_stream(out, css):
end = "</body>\n</html>\n"
# Seek to just before the end text.
out.seek(-1 * len(end), os.SEEK_END)
# check that we're looking at the right thing.
assert out.read() == end
out.seek(-1 * len(end), os.SEEK_END)
# Write the style tag
out.write('<style type="text/css">\n')
out.write(css)
out.write("\n</style>\n")
# And put the end back
out.write(end)
def add_css(out_file, css):
with open(out_file, "r+") as out:
add_css_to_stream(out, css)
def add_css_file(out, css_file):
with open(css_file) as css_fp:
add_css(out, css_fp.read())
def output_file(source_file):
basename, ext = splitext(source_file)
return basename + ".html"
def main():
"""
Entry point for choosing what subcommand to run. Really should be using asciidocapi
"""
# Try parsing command line args and flags with docopt
args = docopt(__doc__, version="cdk")
# Am I going to need validation? No Schema for the moment...
if args['FILE']:
out = output_file(args['FILE'])
# Great! Run asciidoc with appropriate flags
theme = pick_theme(args['--theme'])
if theme not in listdir(THEMES_DIR):
exit('Selected theme "%s" not found. Check ~/.cdk/prefs' % theme)
cmd = create_command(theme, args['--bare'], args['--toc'], args['--notransition'],
args['--logo'])
run_command(cmd, args)
if args['--toc']:
add_css(out, '.deck-container .deck-toc li a span{color: #888;display:inline;}')
if args['--custom-css']:
add_css_file(out, args['--custom-css'])
if args['--open']:
webbrowser.open("file://" + abspath(out))
# other commands
elif args['--generate']:
if isfile(args['--generate']):
exit("%s already exists!" % args['--generate'])
with open(args['--generate'], "w") as fp:
sample = join(LOCATION, "custom", "sample.asc")
fp.write(open(sample).read())
print("Created sample slide deck in %s..." % args['--generate'])
exit()
elif args['--install-theme']:
path = args['--install-theme']
if not isfile(path):
exit("Theme file not found.")
if not path.endswith(".zip"):
exit("Theme installation currently only supports theme install from "
".zip files.")
install_theme(path)
elif args['--default-theme']:
set_default_theme(args['--default-theme'])
if __name__ == '__main__':
main()
|
twitter/cdk
|
cdk/__init__.py
|
Python
|
apache-2.0
| 8,826
|
[
"CDK"
] |
d666340f529a3d1fdf82e0192af44aa72a63ccbd803672e158332c7d8213655b
|
#!/usr/bin/env python
'''
GOAL:
merge various catalogs to make one catalog for all spirals in LCS
- this will enable a much quicker analysis of results than using LCSanalyzeNSA.py
USAGE:
from w/in ipython
%run ~/Dropbox/pythonCode/LCSmergespiralcats.py -r
mergedata()
s=spirals() # this will append new radius data onto tables (combine upper limits, etc)
analysis is then done with LCSanalyzespirals.py
OUTPUT:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_Spirals_all.fits'
'''
#try:
# import pyfits
#except ImportError:
# from astropy.io import fits
from LCScommon import *
from pylab import *
import os
import mystuff as my
from astropy.io import fits
from astropy.table import Table
from astropy.table import Column
import sys
#import aplpy
#import ds9
#from astropy.io import ascii
#from LCSReadmasterBaseWithProfileFits import *
from LCSReadmasterBaseNSA import *
import argparse
parser=argparse.ArgumentParser()
parser.add_argument('-r',"--readtables",help='read in mastertables',action='store_true')
parser.add_argument('-s',"--spirals",help='create LCS_spirals_all.fits (otherwise it will make LCS_all.fits)',default=False,action='store_true')
args = parser.parse_args()
loadclusters=args.readtables
minstellarmass=1.e9
class cluster(baseClusterNSA):
def __init__(self,clustername):
baseClusterNSA.__init__(self,clustername)
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
infile='/Users/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.WithProfileFits.fits'
elif mypath.find('home') > -1:
print "Running on coma"
infile=homedir+'research/LocalClusters/MasterTables/'+clustername+'mastertable.WithProfileFits.fits'
self.mipssnrflag = self.mipssnr > 6.
try:
self.readsnr24NSA()
except:
print self.prefix,": couldn't read SNR24 file"
try:
self.readGalfitSersicResults()
except:
print self.prefix,": couln't read galfit sersic results"
try:
self.readGalfitResults()
except:
print self.prefix,": couldn't read galfit results"
#self.size24=self.sex24.FWHM_DEG*3600.
self.size24=self.sex24.FLUX_RADIUS1*mipspixelscale
self.sizeratio=self.galfit24.cre1*mipspixelscale/self.n.SERSIC_TH50#(self.gim2d.Rhlr_2/self.gim2d.Scale_2)
self.mingalaxymass=5.e9
self.rmagcut=20.+5.*log10(self.cdMpc/(clusterbiweightcenter['Hercules']/H0))
self.rmag=22.5-2.5*log10(self.n.SERSICFLUX[:,2])
self.galfitflag=self.On24ImageFlag & (self.snrse>snr24cut) & (self.n.SERSIC_TH50 > mipspixelscale) & self.spiralflag & (self.ce.LIR > 5.1e8) & (self.stellarmass > minstellarmass)# & ~self.agnflag & (self.rmag < self.rmagcut) #& ~self.galfit24.numerical_error_flag24
self.galfitsample=self.On24ImageFlag & (self.snrse>snr24cut) & (self.n.SERSIC_TH50 > mipspixelscale) & self.spiralflag& ~self.agnflag & (self.ce.LIR > 5.1e8) & (self.stellarmass > minstellarmass)
# exclude objects that had issues w/galfit fit, like nearby neighbor
gal_ids=visual_cut[self.prefix]
for id in gal_ids:
try:
if self.spiralflag[self.nsadict[id]]:
print 'Check out fit for this spiral ',self.prefix,' NSAID=',id
self.galfitflag[self.nsadict[id]]=0
except:
print 'ERROR: problem resetting galfitflag with ',self.prefix,' NSAID=',id
'''
self.member=(self.dvflag) & (self.drR200 < 1.3)
self.nearfield=(self.dvflag) & (self.drR200 > 1.3) & (self.drR200 < 2.)
self.field=((self.dvflag) & (self.drR200 > 2.)) | ~self.dvflag
#self.member=self.dvflag
self.sample24flag=self.galfitflag & self.spiralflag# self.On24ImageFlag & (self.snr24>3) & ~self.agnflag & (self.n.SERSIC_TH50 > Remin) & self.spiralflag# & (log10(self.stellarmass) > 9.5) & (log10(self.stellarmass) < 12)
self.blueclustersample=self.member & self.blueflag & self.sample24flag
self.bluefieldsample=~self.member & self.blueflag & self.sample24flag
self.greenclustersample=self.member & self.greenflag & self.sample24flag
self.greenfieldsample=~self.member & self.greenflag & self.sample24flag
self.redclustersample=self.member & self.redflag & self.sample24flag
self.redfieldsample=~self.member & self.redflag & self.sample24flag
self.varlookup={'stellarmass':log10(self.stellarmass),'Re':self.n.SERSIC_TH50,'R24':self.galfit24.re1*mipspixelscale,'NUV':self.n.ABSMAG[:,1],'r':self.n.ABSMAG[:,4],'m24':self.sex24.MAG_BEST,'redshift':self.n.ZDIST,'NUVr':(self.n.ABSMAG[:,1]-self.n.ABSMAG[:,4]),'NUV24':(self.n.ABSMAG[:,1]-self.sex24.MAG_BEST),'24mass':(self.sex24.MAG_BEST-log10(self.stellarmass)),'ratioR':self.sex24.FLUX_RADIUS1*mipspixelscale/self.n.SERSIC_TH50,'BT':self.gim2d.B_T_r}
'''
self.makedqtable()
def makedqtable(self):
# derived quantities
#my_columns=['SIZE_RATIO','STELLARMASS','SNR_SE','RMAG','DELTA_DEC','DELTA_RA', 'DELTA_V','DR_R200','CLUSTER_PHI','HIflag','HIDef','HImass','NUVr_color','agnflag','galfitflag','CLUSTER_SIGMA','CLUSTER_REDSHIFT','CLUSTER_LX','CLUSTER']
# removing SIZE_RATIO and replacing using upper limits, etc
my_columns=['SIZERATIO','STELLARMASS','SNR_SE','RMAG','DELTA_DEC','DELTA_RA', 'DELTA_V','DR_R200','CLUSTER_PHI','HIflag','HIDef','HImass','NUVr_color','agnflag','galfitflag','CLUSTER_SIGMA','CLUSTER_REDSHIFT','CLUSTER_LX','APEXFLUX','APEXFLUXERR','APEX_SNR','CLUSTER']
arrays=[self.sizeratio,self.stellarmass,self.snrse,self.rmag,self.delta_dec,self.delta_ra,self.dv,self.drR200,self.cluster_phi,self.HIflag,self.HIDef,self.HImass,self.NUVr_color,self.agnflag,self.galfitflag,self.clustersigma*ones(len(self.sizeratio),'f'),self.cz*ones(len(self.sizeratio),'f'),self.cLx*ones(len(self.sizeratio),'f'),self.mipsflux,self.mipsfluxerr,self.mipssnr]
datatypes=['d','d','d','d','d','d','d','d','d','i','d','d','d','i','i','f','f','f','d','d','d']
allcolumns=[]
self.dq=Table()
for i in range(len(my_columns) - 1):
newcol=Column(data=np.array(arrays[i],datatypes[i]),name=my_columns[i])
self.dq.add_column(newcol)
if my_columns[i].find('CLUSTER') > -1:
print newcol
# add column containing cluster name
clustername=[]
for i in range(len(self.sizeratio)):
clustername.append(self.prefix)
newcol=Column(data=np.array(clustername,'S8'),name='CLUSTER')
self.dq.add_column(newcol)
self.allclustername=clustername
if loadclusters:
mkw11=cluster('MKW11')
ngc=cluster('NGC6107')
coma=cluster('Coma')
mkw8=cluster('MKW8')
awm4=cluster('AWM4')
a2052=cluster('A2052')
a2063=cluster('A2063')
herc=cluster('Hercules')
a1367=cluster('A1367')
clustersbylx=[mkw11,ngc,mkw8,awm4,herc,a1367,a2063,a2052,coma]
#clustersbylx=[mkw11]
mylocalclusters=clustersbylx
#clustersbymass=[mkw11,awm4,mkw8,ngc,a2052,a2063,herc,a1367,coma]
#clustersbydistance=[a1367,mkw11,coma,mkw8,ngc,awm4,a2052,a2063,herc]
def mergedata():
nsa_columns=['NSAID','IAUNAME','SUBDIR','RA','DEC','ZDIST','SERSIC_TH50','SERSIC_N','SERSIC_BA','SERSIC_PHI','PETROTH50','PETROTH90','D4000','HAEW','VDISP','FA','HAFLUX','N2FLUX','HBFLUX','O3FLUX','AHDEW','AV','ISDSS','IALFALFA','NMGY','NMGY_IVAR','ABSMAG','SERSICFLUX','CLUMPY','ASYMMETRY','RUN','CAMCOL','FIELD','RERUN']
nsa_format=['J','S','S','E','E','E','E','E','E','E','E','E','E','E','E','E','E']
n_columns=['HIMASS','AGNKAUFF','AGNKEWLEY','AGNSTASIN','AGCNUMBER']
n_format=['E']
#galfit24_columns=['mag1','mag1err','nsersic1','nsersic1err','re1','re1err','axisratio1','axisratio1err','pa1','pa1err','xc1','yc1','numerical_error_flag24','chi2nu','cmag1','cmag1err','cnsersic1','cnsersic1err','cre1','cre1err','caxisratio1','caxisratio1err','cpa1','cpa1err','cxc1','cyc1','cnumerical_error_flag24','cchi2nu','fcmag1','fcmag1err','fcnsersic1','fcnsersic1err','fcre1','fcre1err','fcaxisratio1','fcaxisratio1err','fcpa1','fcpa1err','fcxc1','fcyc1','fcnumerical_error_flag24','fcchi2nu']
galfit24_columns=['fmag1','fmag1err','fnsersic1','fnsersic1err','fre1','fre1err','faxisratio1','faxisratio1err','fpa1','fpa1err','fxc1','fyc1','fnumerical_error_flag24','fchi2nu','fcmag1','fcmag1err','fcnsersic1','fcnsersic1err','fcre1','fcre1err','fcaxisratio1','fcaxisratio1err','fcpa1','fcpa1err','fcxc1','fcyc1','fcnumerical_error_flag24','fcchi2nu']
galfit24_format=['E','E','E','E']
gim2d_columns=['matchflag','B_T_r','e__B_T_r','S2g_1','Re','e_Re','Rd','e_Rd','Rhlr_2','ng','e_ng']
gim2d_format=['L','E','E','E','E','E','E','E']
zoo_columns=['p_elliptical','p_spiral','p_el','p_cs','p_uncertain','p_mg','p_edge','p_dk','match_flag']
zoo_format=['E','E','E','E','E','E','E','E','L']
ce_columns=['LIR_ZDIST','SFR_ZDIST','FLUX24','FLUX24ERR','SE_FLUX_AUTO','LIR_ZCLUST','SFR_ZCLUST','MATCHFLAG24','MIPS_WEIGHT']
zoo_format=['E','E']
se_columns=['FLUX_BEST','FLUX_AUTO','PETRO_RADIUS','FLAGS','FLUX_RADIUS1','FLUX_RADIUS2']
ld_columns=['SIGMA_NN','SIGMA_5','SIGMA_10','RHOMASS']
wise_columns=['W1MAG_3','W1FLG_3','W2MAG_3','W2FLG_3','W3MAG_3','W3FLG_3','W4MAG_3','W4FLG_3']
mstar_columns=['MSTAR_AVG','MSTAR_50','SFR_AVG','SFR100_AVG','SFRAGE','TAU']
ld_format=['E','E','E','E']
my_columns=['STELLARMASS','SNR_SE','RMAG','DELTA_DEC','DELTA_RA', 'DELTA_V','DR_R200','CLUSTER_PHI','HIflag','HIDef','NUVr_color','agnflag','galfitflag','CLUSTER_SIGMA','CLUSTER_REDSHIFT','CLUSTER_LX','CLUSTER','APEXFLUX','APEXFLUXERR','APEX_SNR']
allcolumns=[nsa_columns,n_columns,galfit24_columns,gim2d_columns,zoo_columns,ce_columns,ld_columns,my_columns,wise_columns,se_columns,mstar_columns]
integer_columns=['NSAID','ISDSS','IALFALFA','AGNKAUFF','AGNKEWLEY','AGNSTASIN','matchflag','match_flag','FLG','FLAGS']
nsa_multicolumn=['NMGY','NMGY_IVAR','ABSMAG','SERSICFLUX','CLUMPY','ASYMMETRY']
#print allcolumns
#ldat=Table()
ldat=[]
for collist in allcolumns:
for col in collist:
print col,str(col)
newcol=[]
for cl in mylocalclusters:
if 'NSAID' in collist:
tabdat=cl.nsa
elif 'HIMASS' in collist:
tabdat=cl.n
elif 'fcmag1' in collist:
tabdat=cl.galfit24
elif 'B_T_r' in collist:
tabdat=cl.gim2d
elif 'p_mg' in collist:
tabdat=cl.zoo
elif 'LIR_ZDIST' in collist:
tabdat=cl.ce
elif 'SIGMA_NN' in collist:
tabdat=cl.ld
elif 'STELLARMASS' in collist:
tabdat=cl.dq
elif 'W1MAG_3' in collist:
tabdat=cl.wise
elif 'FLUX_RADIUS1' in collist:
tabdat=cl.sex24
elif 'MSTAR_50' in collist:
tabdat=cl.jmass
if args.spirals:
newcol = newcol + tabdat[col][cl.On24ImageFlag & cl.spiralflag].tolist()
else:
newcol = newcol + tabdat[col][cl.On24ImageFlag].tolist()
if col.find('NSAID') > -1:
newcol=fits.Column(array=np.array(newcol,'i'),name=col, format='J')
elif (col.find('flag') > -1) | (col.find('AGN') > -1):
newcol=fits.Column(array=np.array(newcol,dtype='bool'),name=col,format='L')
elif col in integer_columns:#(col.find('flag') > -1) | (col.find('AGN') > -1) | (col.find('ISDSS') > -1):
newcol=fits.Column(array=np.array(newcol,dtype='i'),name=col,format='J')
#newcol=Column(data=np.array(newcol),name=col,dtype='i')
#print newcol
elif col == 'CLUSTER':
newcol=fits.Column(array=np.array(newcol,'S8'),name=col,format='8A')
elif col == 'IAUNAME':
newcol=fits.Column(array=np.array(newcol,'S19'),name=col,format='19A')
elif col == 'SUBDIR':
newcol=fits.Column(array=np.array(newcol,'S27'),name=col,format='27A')
elif col in nsa_multicolumn:
newcol=fits.Column(array=newcol,name=col, format='7D')
else:
newcol=fits.Column(array=np.array(newcol,'d'),name=col, format='E')
#ldat.add_column(newcol)
ldat=ldat+[newcol]
#col0a = fits.Column(name=col,array=newcol,format=aformat)
#print homedir
#hdu=fits.BinTableHDU.from_columns(ldat)
print len(ldat)
#for l in ldat:
# print l.size,len(l)
#cols=fits.ColDefs(ldat)
tbhdu=fits.BinTableHDU.from_columns(ldat)
#thdulist=fits.HDUList([hdu,tbhdu
if args.spirals:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_Spirals_all.fits'
else:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_all.fits'
tbhdu.writeto(outfile,clobber='yes')
#if os.path.exists(outfile):
# os.remove(outfile)
#allcols=fits.ColDefs([col0a,col0b,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13,col14,col15,col16,col17,col18,col19,col20,col21,col22,col23,col24,col25,col26,col27,col28,col29,col30,col31,col32,col33,col34,col35,col36])
#ldat.write(outfile)
#tbhdu=fits.new_table(allcols)
#tbhdu.writeto(outfile)
# for col in my_columns:
# newcol=[]
# for cl in clustersbymass:
# newcol = newcol + cl.col[cl.galfitflag].tolist()
# newcol=np.array(newcol,'d')
# ldat.add_column(col,newcol)
class spirals():
'''
read in spiral table and append info on combined radius measurements
'''
def __init__(self):
if args.spirals:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_Spirals_all.fits'
else:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_all.fits'
hdq=fits.open(outfile)
self.s=hdq[1].data
orig_cols=self.s.columns
self.nsadict=dict((a,b) for a,b in zip(self.s.NSAID,arange(len(self.s.NSAID))))
# define new columns
point_sources=[166113,166083,72768,72633,69673,166129,79608,113092,72625,79540,70657,79545,69593,113065,166682,78100,103773,166167,72461,72490,79531,79591,166064,166051,166083]
bad_24_fits=[72722, 80769, 68339, 68305, 82198, 78100, 70634] # can't get 24um to converge, but not point sources; use unconvolved radius as upper limit (includes ring galaxy)
#bad_fit=[82198] # don't know what
use_free_param_model=[113107]
# create a new re1_24 array that uses fixed BA, free model, or unconvolved model
# point sources - set Re to 1 pixel and mark as upper limit
# bad fits but not point sources - used model without convolution as upper limit
# one case where freely fit BA/PA is warranted (113107) b/c NSA PA is aligned with bar whereas 24um emission is aligned with disk
print len(self.nsadict),len(self.s.RA)
self.re_upperlimit=zeros(len(self.s.RA),'bool')
self.pointsource=zeros(len(self.s.RA),'bool')
self.super_re1=ones(len(self.s.fcre1))*self.s.fcre1
self.super_re1err=ones(len(self.s.fcre1))*self.s.fcre1err
## for id in point_sources:
## try:
## i=self.nsadict[int(id)]
## except KeyError:
## continue
## print 'replacing radius measurement!',self.s.fcre1[i],self.super_re1[i],1
## self.super_re1[i]=1.
## self.super_re1err[i]=1.
## self.re_upperlimit[i]=1
## self.pointsource[i]=1
## print 'take 2: replacing radius measurement!',self.s.fcre1[i],self.super_re1[i],1
## for id in bad_24_fits:
## i=self.nsadict[id]
## self.super_re1[i]=self.s.fre1[i]
## self.super_re1err[i]=self.s.fre1err[i]
## self.re_upperlimit[i]=1
## for id in use_free_param_model:
## i=self.nsadict[id]
## self.super_re1[i]=self.s.fre1[i]
## self.super_re1err[i]=self.s.fre1err[i]
self.SIZE_RATIO=self.super_re1*mipspixelscale/self.s.SERSIC_TH50
self.SIZE_RATIOERR=self.super_re1err*mipspixelscale/self.s.SERSIC_TH50
# add columns to table and rewrite
# self.pointsource
# self.re_upperlimit
# self.super_re1
# self.SIZE_RATIO
c1=fits.Column(name='POINTSOURCE', format='L',array=self.pointsource)
c2=fits.Column(name='RE_UPPERLIMIT', format='L',array=self.re_upperlimit)
c3=fits.Column(name='SUPER_RE1', format='E',array=self.super_re1)
c3a=fits.Column(name='SUPER_RE1ERR', format='E',array=self.super_re1err)
c4=fits.Column(name='SIZE_RATIO', format='E',array=self.SIZE_RATIO)
c4a=fits.Column(name='SIZE_RATIOERR', format='E',array=self.SIZE_RATIOERR)
new_cols=fits.ColDefs([c1,c2,c3,c3a,c4,c4a])
hdu=fits.BinTableHDU.from_columns(orig_cols+new_cols)
if args.spirals:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_Spirals_all_size.fits'
else:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_all_size.fits'
hdu.writeto(outfile,clobber='yes')
def readtable():
if args.spirals:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_Spirals_all.fits'
else:
outfile=homedir+'research/LocalClusters/NSAmastertables/LCS_all.fits'
sdat=Table.read(outfile)
return sdat
|
rfinn/LCS
|
paper1code/LCSmergespiralcats.py
|
Python
|
gpl-3.0
| 18,319
|
[
"Galaxy"
] |
704a063e114e71be3c135171e99afdd863ed36adf16335fa103d5b94d0dfc7b2
|
import re
import logging
import pybel.constants as pc
from collections import defaultdict
from pybel.struct import has_protein_modification
from pybel.canonicalize import edge_to_bel
from pybel.resources import get_bel_resource
from indra.statements import *
from indra.sources.bel.rdf_processor import bel_to_indra, chebi_name_id
from indra.databases import hgnc_client, uniprot_client
from indra.assemblers.pybel.assembler import _pybel_indra_act_map
__all__ = [
'PybelProcessor',
'get_agent',
]
logger = logging.getLogger(__name__)
_pybel_indra_pmod_map = {
'Ph': 'phosphorylation',
'Hy': 'hydroxylation',
'Sumo': 'sumoylation',
'Ac': 'acetylation',
'Glyco': 'glycosylation',
'ADPRib': 'ribosylation',
'Ub': 'ubiquitination',
'Farn': 'farnesylation',
'Gerger': 'geranylgeranylation',
'Palm': 'palmitoylation',
'Myr': 'myristoylation',
'Me': 'methylation',
}
#: A mapping from the BEL text location annotation to the INDRA ones at
# :py:data:`indra.reach.processor._section_list`
#: see https://arty.scai.fraunhofer.de/artifactory/bel/annotation/text-location/text-location-1.0.0.belanno
_pybel_text_location_map = {
"Abstract": 'abstract',
"Results": 'results',
"Legend": 'figure',
"Review": None,
'Introduction': 'introduction',
'Methods': 'methods',
'Discussion': 'discussion',
'Conclusion': 'conclusion'
}
class PybelProcessor(object):
"""Extract INDRA Statements from a PyBEL Graph.
Currently does not handle non-causal relationships (positiveCorrelation,
(negativeCorrelation, hasVariant, etc.)
Parameters
----------
graph : pybel.BELGraph
PyBEL graph containing the BEL content.
Attributes
----------
statements : list[indra.statements.Statement]
A list of extracted INDRA Statements representing BEL Statements.
"""
def __init__(self, graph):
self.graph = graph
self.statements = []
self.unhandled = []
self.annot_manager = AnnotationManager(self.graph.annotation_url)
# FIXME: Handle reactions
def get_statements(self):
for u_data, v_data, k, d in self.graph.edges(keys=True, data=True):
# We only interpret causal relations, not correlations
if d[pc.RELATION] not in pc.CAUSAL_RELATIONS:
self.unhandled.append((u_data, v_data, k, d))
continue
# If the left or right-hand sides involve complex abundances,
# add them as statements
for node_ix, node_data in enumerate((u_data, v_data)):
if node_data[pc.FUNCTION] == pc.COMPLEX:
self._get_complex(u_data, v_data, k, d, node_ix)
subj_activity = _get_activity_condition(d.get(pc.SUBJECT))
obj_activity = _get_activity_condition(d.get(pc.OBJECT))
obj_to_loc = _get_translocation_target(d.get(pc.OBJECT))
# If the object is a translocation, this represents a controlled
# translocation, which we currently do not represent
if obj_to_loc:
self.unhandled.append((u_data, v_data, k, d))
logger.info("Controlled translocations are currently not "
"handled: %s)", edge_to_bel(u_data, v_data, d))
continue
v_func = v_data[pc.FUNCTION]
# Modification, e.g.
# x(Foo) -> p(Bar, pmod(Ph))
# act(x(Foo)) -> p(Bar, pmod(Ph))
if v_func == pc.PROTEIN and \
has_protein_modification(v_data):
if obj_activity:
logger.info("Ignoring object activity modifier in "
"modification statement: %s, %s, %s, %s",
u_data, v_data, k, d)
else:
self._get_modification(u_data, v_data, k, d)
elif obj_activity:
# If the agents on the left and right hand sides are the same,
# then get an active form:
# ActiveForm
# p(Foo, {variants}) ->/-| act(p(Foo))
# Also Composite active forms:
# compositeAbundance(p(Foo, pmod('Ph', 'T')),
# p(Foo, pmod('Ph', 'Y'))) ->/-|
# act(p(Foo))
if not subj_activity and _proteins_match(u_data, v_data):
self._get_active_form(u_data, v_data, k, d)
# Gef
# act(p(Foo)) => gtp(p(Foo))
# Gap
# act(p(Foo)) =| gtp(p(Foo))
elif subj_activity and _rel_is_direct(d) and \
obj_activity.activity_type == 'gtpbound':
self._get_gef_gap(u_data, v_data, k, d)
# Activation/Inhibition
# x(Foo) -> act(x(Foo))
# act(x(Foo)) -> act(x(Foo))
# GtpActivation
# gtp(p(Foo)) => act(p(Foo))
else:
self._get_regulate_activity(u_data, v_data, k, d)
# Activations involving biological processes or pathologies
# x(Foo) -> bp(Bar)
elif v_func in (pc.BIOPROCESS, pc.PATHOLOGY):
self._get_regulate_activity(u_data, v_data, k, d)
# Regulate amount
# x(Foo) -> p(Bar)
# x(Foo) -> r(Bar)
# act(x(Foo)) -> p(Bar):
# x(Foo) -> deg(p(Bar))
# act(x(Foo)) ->/-| deg(p(Bar))
elif v_data.function in (pc.PROTEIN, pc.RNA, pc.ABUNDANCE,
pc.COMPLEX, pc.MIRNA) and not obj_activity:
self._get_regulate_amount(u_data, v_data, k, d)
# Controlled conversions
# x(Foo) -> rxn(reactants(r1,...,rn), products(p1,...pn))
# act(x(Foo)) -> rxn(reactants(r1,...,rn), products(p1,...pn))
# Note that we can't really handle statements where the relation
# is decreases, as inhibition of a reaction match the semantics
# of a controlled conversion
elif v_data.function == pc.REACTION and \
d[pc.RELATION] in pc.CAUSAL_INCREASE_RELATIONS:
self._get_conversion(u_data, v_data, k, d)
# UNHANDLED
# rxn(reactants(r1,...,rn), products(p1,...pn))
# Complex(a,b)
# p(A, pmod('ph')) -> Complex(A, B)
# Complex(A-Ph, B)
# Complexes
# complex(x(Foo), x(Bar), ...)
else:
self.unhandled.append((u_data, v_data, k, d))
def _get_complex(self, u_data, v_data, k, edge_data, node_ix):
# Get an agent with bound conditions from the Complex
assert node_ix in (0, 1)
node_data = [u_data, v_data][node_ix]
cplx_agent = get_agent(node_data, None)
if cplx_agent is None:
return
agents = [bc.agent for bc in cplx_agent.bound_conditions]
cplx_agent.bound_conditions = []
agents.append(cplx_agent)
ev = self._get_evidence(u_data, v_data, k, edge_data)
stmt = Complex(agents, evidence=[ev])
self.statements.append(stmt)
def _get_regulate_amount(self, u_data, v_data, k, edge_data):
subj_agent = get_agent(u_data, edge_data.get(pc.SUBJECT))
obj_agent = get_agent(v_data, edge_data.get(pc.OBJECT))
if subj_agent is None or obj_agent is None:
self.unhandled.append((u_data, v_data, edge_data))
return
obj_mod = edge_data.get(pc.OBJECT)
deg_polarity = (
-1
if obj_mod and obj_mod.get(pc.MODIFIER) == pc.DEGRADATION else
1
)
rel_polarity = (1 if edge_data[pc.RELATION] in
pc.CAUSAL_INCREASE_RELATIONS else -1)
# Set polarity accordingly based on the relation type and whether
# the object is a degradation node
if deg_polarity * rel_polarity > 0:
stmt_class = IncreaseAmount
else:
stmt_class = DecreaseAmount
ev = self._get_evidence(u_data, v_data, k, edge_data)
stmt = stmt_class(subj_agent, obj_agent, evidence=[ev])
self.statements.append(stmt)
def _get_modification(self, u_data, v_data, k, edge_data):
subj_agent = get_agent(u_data, edge_data.get(pc.SUBJECT))
mods, muts = _get_all_pmods(v_data)
v_data_no_mods = v_data.get_parent()
obj_agent = get_agent(v_data_no_mods, edge_data.get(pc.OBJECT))
if subj_agent is None or obj_agent is None:
self.unhandled.append((u_data, v_data, k, edge_data))
return
for mod in mods:
modclass = modtype_to_modclass[mod.mod_type]
ev = self._get_evidence(u_data, v_data, k, edge_data)
stmt = modclass(subj_agent, obj_agent, mod.residue, mod.position,
evidence=[ev])
self.statements.append(stmt)
def _get_regulate_activity(self, u_data, v_data, k, edge_data):
# Subject info
subj_agent = get_agent(u_data, edge_data.get(pc.SUBJECT))
subj_activity = _get_activity_condition(edge_data.get(pc.SUBJECT))
subj_function = u_data.function
# Object info
# Note: Don't pass the object modifier data because we don't want to
# put an activity on the agent
obj_agent = get_agent(v_data, None)
obj_function = v_data.function
# If it's a bioprocess object, we won't have an activity in the edge
if obj_function in (pc.BIOPROCESS, pc.PATHOLOGY):
activity_type = 'activity'
else:
obj_activity_condition = \
_get_activity_condition(edge_data.get(pc.OBJECT))
activity_type = obj_activity_condition.activity_type
assert obj_activity_condition.is_active is True
# Check for valid subject/object
if subj_agent is None or obj_agent is None:
self.unhandled.append((u_data, v_data, edge_data))
return
# Check which kind of statement we need to make
# GtpActivation
if subj_activity and subj_activity.activity_type == 'gtpbound' and \
subj_function == pc.PROTEIN and obj_function == pc.PROTEIN and \
edge_data[pc.RELATION] == pc.DIRECTLY_INCREASES:
stmt_class = GtpActivation
elif edge_data[pc.RELATION] in pc.CAUSAL_INCREASE_RELATIONS:
stmt_class = Activation
else:
stmt_class = Inhibition
ev = self._get_evidence(u_data, v_data, k, edge_data)
stmt = stmt_class(subj_agent, obj_agent, activity_type, evidence=[ev])
self.statements.append(stmt)
def _get_active_form(self, u_data, v_data, k, edge_data):
subj_agent = get_agent(u_data, edge_data.get(pc.SUBJECT))
# Don't pass the object modifier info because we don't want an activity
# condition applied to the agent
obj_agent = get_agent(v_data)
if subj_agent is None or obj_agent is None:
self.unhandled.append((u_data, v_data, edge_data))
return
obj_activity_condition = \
_get_activity_condition(edge_data.get(pc.OBJECT))
activity_type = obj_activity_condition.activity_type
# If the relation is DECREASES, this means that this agent state
# is inactivating
is_active = edge_data[pc.RELATION] in pc.CAUSAL_INCREASE_RELATIONS
ev = self._get_evidence(u_data, v_data, k, edge_data)
stmt = ActiveForm(subj_agent, activity_type, is_active, evidence=[ev])
self.statements.append(stmt)
def _get_gef_gap(self, u_data, v_data, k, edge_data):
subj_agent = get_agent(u_data, edge_data.get(pc.SUBJECT))
obj_agent = get_agent(v_data)
if subj_agent is None or obj_agent is None:
self.unhandled.append((u_data, v_data, k, edge_data))
return
ev = self._get_evidence(u_data, v_data, k, edge_data)
if edge_data[pc.RELATION] in pc.CAUSAL_INCREASE_RELATIONS:
stmt_class = Gef
else:
stmt_class = Gap
stmt = stmt_class(subj_agent, obj_agent, evidence=[ev])
self.statements.append(stmt)
def _get_conversion(self, u_data, v_data, k, edge_data):
subj_agent = get_agent(u_data, edge_data.get(pc.SUBJECT))
# Get the nodes for the reactants and products
reactant_agents = [get_agent(r) for r in v_data[pc.REACTANTS]]
product_agents = [get_agent(p) for p in v_data[pc.PRODUCTS]]
if subj_agent is None or \
any([r is None for r in reactant_agents]) or \
any([p is None for p in product_agents]):
self.unhandled.append((u_data, v_data, k, edge_data))
return
ev = self._get_evidence(u_data, v_data, k, edge_data)
stmt = Conversion(subj_agent, obj_from=reactant_agents,
obj_to=product_agents, evidence=ev)
self.statements.append(stmt)
def _get_evidence(self, u_data, v_data, k, edge_data):
ev_text = edge_data.get(pc.EVIDENCE)
ev_citation = edge_data.get(pc.CITATION)
ev_pmid = None
if ev_citation:
cit_type = ev_citation[pc.CITATION_TYPE]
cit_ref = ev_citation[pc.CITATION_REFERENCE]
if cit_type == pc.CITATION_TYPE_PUBMED:
ev_pmid = cit_ref
ev_ref = None
else:
ev_pmid = None
ev_ref = '%s: %s' % (cit_type, cit_ref)
epistemics = {'direct': _rel_is_direct(edge_data)}
annotations = edge_data.get(pc.ANNOTATIONS, {})
annotations['bel'] = edge_to_bel(u_data, v_data, edge_data)
if ev_ref: # FIXME what if ev_citation is Falsy?
annotations['citation_ref'] = ev_ref
context = extract_context(annotations, self.annot_manager)
text_location = annotations.pop('TextLocation', None)
if text_location:
# Handle dictionary text_location like {'Abstract': True}
if isinstance(text_location, dict):
# FIXME: INDRA's section_type entry is meant to contain
# a single section string like "abstract" but in principle
# pybel could have a list of entries in the TextLocation dict.
# Here we just take the first one.
text_location = list(text_location.keys())[0]
epistemics['section_type'] = \
_pybel_text_location_map.get(text_location)
ev = Evidence(text=ev_text, pmid=ev_pmid, source_api='bel',
source_id=k, epistemics=epistemics,
annotations=annotations, context=context)
return ev
def get_agent(node_data, node_modifier_data=None):
# FIXME: Handle translocations on the agent for ActiveForms, turn into
# location conditions
# Check the node type/function
node_func = node_data[pc.FUNCTION]
if node_func not in (pc.PROTEIN, pc.RNA, pc.BIOPROCESS, pc.COMPLEX,
pc.PATHOLOGY, pc.ABUNDANCE, pc.MIRNA):
mod_data = node_modifier_data or 'No node data'
logger.info("Nodes of type %s not handled: %s",
node_func, mod_data)
return None
# Skip gene/protein fusions
if pc.FUSION in node_data:
logger.info("Gene and protein fusions not handled: %s" % str(node_data))
return None
# COMPLEXES ------------
# First, handle complexes, which will consist recursively of other agents
if node_func == pc.COMPLEX:
# First, check for members: if there are no members, we assume this
# is a named complex
members = node_data.get(pc.MEMBERS)
if members is None:
return None
# Otherwise, get the "main" agent, to which the other members will be
# attached as bound conditions
main_agent = get_agent(members[0])
# If we can't get the main agent, return None
if main_agent is None:
return None
bound_conditions = [BoundCondition(get_agent(m), True)
for m in members[1:]]
# Check the bound_conditions for any None agents
if any([bc.agent is None for bc in bound_conditions]):
return None
main_agent.bound_conditions = bound_conditions
# Get activity of main agent
ac = _get_activity_condition(node_modifier_data)
main_agent.activity = ac
return main_agent
# OTHER NODE TYPES -----
# Get node identifier information
name = node_data.get(pc.NAME)
ns = node_data[pc.NAMESPACE]
ident = node_data.get(pc.IDENTIFIER)
# No ID present, get identifier using the name, namespace
db_refs = None
if not ident:
assert name, "Node must have a name if lacking an identifier."
if ns == 'HGNC':
hgnc_id = hgnc_client.get_hgnc_id(name)
if not hgnc_id:
logger.info("Invalid HGNC name: %s (%s)" % (name, node_data))
return None
db_refs = {'HGNC': hgnc_id}
up_id = _get_up_id(hgnc_id)
if up_id:
db_refs['UP'] = up_id
# FIXME: Look up go ID in ontology lookup service
# FIXME: Look up MESH IDs from name
# FIXME: For now, just use node name
elif ns in ('GOBP', 'MESHPP', 'MESHD'):
db_refs = {}
# For now, handle MGI/RGD but putting the name into the db_refs so
# it's clear what namespace the name belongs to
# FIXME: Full implementation would look up MGI/RGD identifiers from
# the names, and obtain corresponding Uniprot IDs
elif ns in ('MGI', 'RGD'):
db_refs = {ns: name}
# Map Selventa families to FamPlexes
elif ns == 'SFAM':
db_refs = {'SFAM': name}
indra_name = bel_to_indra.get(name)
if indra_name is None:
logger.info('Could not find mapping for BEL/SFAM family: '
'%s (%s)' % (name, node_data))
else:
db_refs['FPLX'] = indra_name
name = indra_name
# Map Entrez genes to HGNC/UP
elif ns == 'EGID':
hgnc_id = hgnc_client.get_hgnc_from_entrez(name)
db_refs = {'EGID': name}
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
name = hgnc_client.get_hgnc_name(hgnc_id)
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id:
db_refs['UP'] = up_id
else:
logger.info('HGNC entity %s with HGNC ID %s has no '
'corresponding Uniprot ID.',
name, hgnc_id)
else:
logger.info('Could not map EGID%s to HGNC.' % name)
name = 'E%s' % name
# CHEBI
elif ns == 'CHEBI':
chebi_id = chebi_name_id.get(name)
if chebi_id:
db_refs = {'CHEBI': chebi_id}
else:
logger.info('CHEBI name %s not found in map.' % name)
# SDIS, SCHEM: Include the name as the ID for the namespace
elif ns in ('SDIS', 'SCHEM'):
db_refs = {ns: name}
else:
print("Unhandled namespace: %s: %s (%s)" % (ns, name, node_data))
# We've already got an identifier, look up other identifiers if necessary
else:
# Get the name, overwriting existing name if necessary
if ns == 'HGNC':
name = hgnc_client.get_hgnc_name(ident)
db_refs = {'HGNC': ident}
up_id = _get_up_id(ident)
if up_id:
db_refs['UP'] = up_id
elif ns == 'UP':
db_refs = {'UP': ident}
name = uniprot_client.get_gene_name(ident)
assert name
if uniprot_client.is_human(ident):
hgnc_id = hgnc_client.get_hgnc_id(name)
if not hgnc_id:
logger.info('Uniprot ID linked to invalid human gene '
'name %s' % name)
else:
db_refs['HGNC'] = hgnc_id
elif ns in ('MGI', 'RGD'):
raise ValueError('Identifiers for MGI and RGD databases are not '
'currently handled: %s' % node_data)
else:
print("Unhandled namespace with identifier: %s: %s (%s)" %
(ns, name, node_data))
if db_refs is None:
logger.info('Unable to get identifier information for node: %s',
node_data)
return None
# Get modification conditions
mods, muts = _get_all_pmods(node_data)
# Get activity condition
ac = _get_activity_condition(node_modifier_data)
to_loc = _get_translocation_target(node_modifier_data)
# Check for unhandled node modifiers, skip if so
if _has_unhandled_modifiers(node_modifier_data):
return None
# Make the agent
ag = Agent(name, db_refs=db_refs, mods=mods, mutations=muts, activity=ac,
location=to_loc)
return ag
def extract_context(annotations, annot_manager):
"""Return a BioContext object extracted from the annotations.
The entries that are extracted into the BioContext are popped from the
annotations.
Parameters
----------
annotations : dict
PyBEL annotations dict
annot_manager : AnnotationManager
An annotation manager to get name/db reference mappings for each ot the
annotation types.
Returns
-------
bc : BioContext
An INDRA BioContext object
"""
def get_annot(annotations, key):
"""Return a specific annotation given a key."""
val = annotations.pop(key, None)
if val:
val_list = [v for v, tf in val.items() if tf]
if len(val_list) > 1:
logger.warning('More than one "%s" in annotations' % key)
elif not val_list:
return None
return val_list[0]
return None
bc = BioContext()
species = get_annot(annotations, 'Species')
if species:
name = annot_manager.get_mapping('Species', species)
bc.species = RefContext(name=name, db_refs={'TAXONOMY': species})
mappings = (('CellLine', 'cell_line', None),
('Disease', 'disease', None),
('Anatomy', 'organ', None),
('Cell', 'cell_type', None),
('CellStructure', 'location', 'MESH'))
for bel_name, indra_name, ns in mappings:
ann = get_annot(annotations, bel_name)
if ann:
ref = annot_manager.get_mapping(bel_name, ann)
if ref is None:
continue
if not ns:
db_ns, db_id = ref.split('_', 1)
else:
db_ns, db_id = ns, ref
setattr(bc, indra_name,
RefContext(name=ann, db_refs={db_ns: db_id}))
# Overwrite blank BioContext
if not bc:
bc = None
return bc
def _rel_is_direct(d):
return d[pc.RELATION] in (pc.DIRECTLY_INCREASES, pc.DIRECTLY_DECREASES)
def _get_up_id(hgnc_id):
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if not up_id:
logger.info("No Uniprot ID for HGNC ID %s" % hgnc_id)
return up_id
class AnnotationManager(object):
def __init__(self, annotation_urls):
self.resources = {}
for key, url in annotation_urls.items():
res = get_bel_resource(url)
self.resources[key] = res
self.failures = defaultdict(set)
def get_mapping(self, key, value):
resource = self.resources.get(key)
if resource is None:
return None
term = resource['Values'].get(value)
if term is not None:
return term
logger.warning('unhandled annotation: %s:%s',
key, value)
self.failures[key].add(value)
def _get_all_pmods(node_data):
mods = []
muts = []
variants = node_data.get(pc.VARIANTS)
if not variants:
return mods, muts
for var in variants:
if var[pc.KIND] == pc.HGVS:
hgvs_str = var[pc.IDENTIFIER]
position, res_from, res_to = _parse_mutation(hgvs_str)
if position is None and res_from is None and res_to is None:
logger.info("Could not parse HGVS string %s" % hgvs_str)
else:
mut_cond = MutCondition(position, res_from, res_to)
muts.append(mut_cond)
elif var[pc.KIND] == pc.PMOD:
var_id_dict = var[pc.IDENTIFIER]
var_ns = var_id_dict[pc.NAMESPACE]
if var_ns == pc.BEL_DEFAULT_NAMESPACE:
var_id = var_id_dict[pc.NAME]
mod_type = _pybel_indra_pmod_map.get(var_id)
if mod_type is None:
logger.info("Unhandled modification type %s (%s)" %
(var_id, node_data))
continue
mc = ModCondition(mod_type, var.get(pc.PMOD_CODE),
var.get(pc.PMOD_POSITION))
mods.append(mc)
# FIXME These unhandled mod types should result in throwing out
# the node (raise, or return None)
elif var[pc.KIND] == pc.GMOD:
logger.debug('Unhandled node variant GMOD: %s' % node_data)
elif var[pc.KIND] == pc.FRAGMENT:
logger.debug('Unhandled node variant FRAG: %s' % node_data)
else:
logger.debug('Unknown node variant type: %s' % node_data)
return mods, muts
def _get_activity_condition(node_modifier_data):
if node_modifier_data is None or node_modifier_data == {}:
return None
modifier = node_modifier_data.get(pc.MODIFIER)
if modifier is None or modifier != pc.ACTIVITY:
return None
effect = node_modifier_data.get(pc.EFFECT)
# No specific effect, just return generic activity
if not effect:
return ActivityCondition('activity', True)
activity_ns = effect[pc.NAMESPACE]
if activity_ns == pc.BEL_DEFAULT_NAMESPACE:
activity_name = effect[pc.NAME]
activity_type = _pybel_indra_act_map.get(activity_name)
# If an activity type in Bel/PyBel that is not implemented in INDRA,
# return generic activity
if activity_type is None:
return ActivityCondition('activity', True)
return ActivityCondition(activity_type, True)
# If an unsupported namespace, simply return generic activity
return ActivityCondition('activity', True)
def _get_translocation_target(node_modifier_data):
# First check if there is a translocation modifier
if node_modifier_data is None or node_modifier_data == {}:
return None
modifier = node_modifier_data.get(pc.MODIFIER)
if modifier is None or modifier != pc.TRANSLOCATION:
return None
# Next, make sure there is information on the translocation target
transloc_data = node_modifier_data.get(pc.EFFECT)
if transloc_data is None:
return None
to_loc_info = transloc_data.get(pc.TO_LOC)
if not to_loc_info:
return None
to_loc_ns = to_loc_info.get(pc.NAMESPACE)
to_loc_name = to_loc_info.get(pc.NAME)
# Only use GO Cellular Component location names
if to_loc_ns != 'GOCC' or not to_loc_name:
return None
try:
valid_loc = get_valid_location(to_loc_name)
except InvalidLocationError:
return None
return valid_loc
def _has_unhandled_modifiers(node_modifier_data):
# First check if there is a translocation modifier
if node_modifier_data is None or node_modifier_data == {}:
return False
mod = node_modifier_data.get(pc.MODIFIER)
if mod is None:
return False
if mod in (pc.CELL_SECRETION, pc.CELL_SURFACE_EXPRESSION):
logger.info("Unhandled node modifier data: %s" % node_modifier_data)
return True
def _proteins_match(u_data, v_data):
return (
u_data[pc.FUNCTION] == pc.PROTEIN and
v_data[pc.FUNCTION] == pc.PROTEIN and
pc.NAMESPACE in u_data and pc.NAMESPACE in v_data and
pc.NAME in u_data and pc.NAME in v_data and
u_data[pc.NAMESPACE] == v_data[pc.NAMESPACE] and
u_data[pc.NAME] == v_data[pc.NAME]
)
_hgvs_protein_mutation = re.compile('^p.([A-Z][a-z]{2})(\d+)([A-Z][a-z]{2})')
def _parse_mutation(s):
m = _hgvs_protein_mutation.match(s)
if not m:
return None, None, None
from_aa, position, to_aa = m.groups()
return position, from_aa, to_aa
|
pvtodorov/indra
|
indra/sources/bel/processor.py
|
Python
|
bsd-2-clause
| 28,892
|
[
"Pybel"
] |
54b5f0d09faf8570d68e8045c84f32522030e639bb87a2ad1df60e051299b227
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend as keras_backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.layers import kernelized as kernel_layers
from tensorflow.python.keras.utils import kernelized_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
def _assert_all_close(self, expected, actual, atol=0.001):
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
self.assertAllClose(expected, actual, atol=atol)
else:
self.assertAllClose(expected, actual, atol=atol)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_output_dim(self):
with self.assertRaisesRegexp(
ValueError, r'`output_dim` should be a positive integer. Given: -3.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_unsupported_kernel_type(self):
with self.assertRaisesRegexp(
ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'):
_ = kernel_layers.RandomFourierFeatures(
3, 'unsupported_kernel', stddev=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_scale(self):
with self.assertRaisesRegexp(
ValueError,
r'When provided, `scale` should be a positive float. Given: 0.0.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_input_shape(self):
inputs = random_ops.random_uniform((3, 2, 4), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
with self.assertRaisesRegexp(
ValueError,
r'The rank of the input tensor should be 2. Got 3 instead.'):
_ = rff_layer(inputs)
@parameterized.named_parameters(
('gaussian', 'gaussian', 10.0, False),
('random', init_ops.random_uniform_initializer, 1.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_random_features_properties(self, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=scale,
trainable=trainable)
self.assertEqual(rff_layer.output_dim, 10)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
@parameterized.named_parameters(('gaussian', 'gaussian', False),
('laplacian', 'laplacian', True),
('other', init_ops.ones_initializer, True))
@test_util.run_in_graph_and_eager_modes()
def test_call(self, initializer, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=1.0,
trainable=trainable,
name='random_fourier_features')
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, 10], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_no_eager_Leak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
kernel_layers.RandomFourierFeatures(output_dim=4, name='rff')(inputs)
kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs)
@test_util.run_in_graph_and_eager_modes()
def test_output_shape(self):
inputs = random_ops.random_uniform((3, 2), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=7, name='random_fourier_features', trainable=True)
outputs = rff_layer(inputs)
self.assertEqual([3, 7], outputs.shape.as_list())
@parameterized.named_parameters(
('gaussian', 'gaussian'), ('laplacian', 'laplacian'),
('other', init_ops.random_uniform_initializer))
@test_util.run_deprecated_v1
def test_call_on_placeholder(self, initializer):
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegexp(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegexp(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5, name='random_fourier_features')
rff_layer(inputs)
@parameterized.named_parameters(('gaussian', 10, 'gaussian', 2.0),
('laplacian', 5, 'laplacian', None),
('other', 10, init_ops.ones_initializer, 1.0))
@test_util.run_in_graph_and_eager_modes()
def test_compute_output_shape(self, output_dim, initializer, scale):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim, initializer, scale=scale, name='rff')
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, 2, 3]))
with self.assertRaisesRegexp(
ValueError, r'The innermost dimension of input shape must be defined.'):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, None]))
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape((None, 3)).as_list())
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape(
tensor_shape.TensorShape([None, 2])).as_list())
self.assertEqual([4, output_dim],
rff_layer.compute_output_shape((4, 1)).as_list())
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, False),
('laplacian', 5, 'laplacian', 5.5, True),
('other', 7, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_get_config(self, output_dim, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim,
initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features',
)
expected_initializer = initializer
if isinstance(initializer, init_ops.Initializer):
expected_initializer = initializers.serialize(initializer)
expected_config = {
'output_dim': output_dim,
'kernel_initializer': expected_initializer,
'scale': scale,
'name': 'random_fourier_features',
'trainable': trainable,
'dtype': None,
}
self.assertLen(expected_config, len(rff_layer.get_config()))
self.assertSameElements(
list(expected_config.items()), list(rff_layer.get_config().items()))
@parameterized.named_parameters(
('gaussian', 5, 'gaussian', None, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 7, init_ops.ones_initializer(), 2.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_from_config(self, output_dim, initializer, scale, trainable):
model_config = {
'output_dim': output_dim,
'kernel_initializer': initializer,
'scale': scale,
'trainable': trainable,
'name': 'random_fourier_features',
}
rff_layer = kernel_layers.RandomFourierFeatures.from_config(model_config)
self.assertEqual(rff_layer.output_dim, output_dim)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, output_dim], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.trainable_variables, num_trainable_vars)
if trainable:
self.assertEqual('random_fourier_features/random_features_scale:0',
rff_layer.trainable_variables[0].name)
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 10, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_same_random_features_params_reused(self, output_dim, initializer,
scale, trainable):
"""Applying the layer on the same input twice gives the same output."""
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features')
inputs = constant_op.constant(
np.random.uniform(low=-1.0, high=1.0, size=(2, 4)))
output1 = rff_layer(inputs)
output2 = rff_layer(inputs)
self._assert_all_close(output1, output2)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0), ('laplacian', 'laplacian', 3.0),
('other', init_ops.random_uniform_initializer(), 5.0))
@test_util.run_in_graph_and_eager_modes()
def test_different_params_similar_approximation(self, initializer, scale):
random_seed.set_random_seed(12345)
rff_layer1 = kernel_layers.RandomFourierFeatures(
output_dim=3000,
kernel_initializer=initializer,
scale=scale,
name='rff1')
rff_layer2 = kernel_layers.RandomFourierFeatures(
output_dim=2000,
kernel_initializer=initializer,
scale=scale,
name='rff2')
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
# Apply both layers to both inputs.
output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x)
output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y)
output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x)
output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)
# Compute the inner products of the outputs (on inputs x and y) for both
# layers. For any fixed random features layer rff_layer, and inputs x, y,
# rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 20.0, _exact_laplacian(stddev=20.0)))
@test_util.run_in_graph_and_eager_modes()
def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
"""Approximation is bad when output dimension is small."""
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
small_output_dim = 10
random_seed.set_random_seed(1234)
# Initialize layer.
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=small_output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# Apply layer to both inputs.
output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)
# The inner products of the outputs (on inputs x and y) approximates the
# real value of the RBF kernel but poorly since the output dimension of the
# layer is small.
exact_kernel_value = exact_kernel_fn(x, y)
approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
abs_error_eval = sess.run([abs_error])
self.assertGreater(abs_error_eval[0][0], 0.05)
self.assertLess(abs_error_eval[0][0], 0.5)
else:
self.assertGreater(abs_error, 0.05)
self.assertLess(abs_error, 0.5)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 10.0, _exact_laplacian(stddev=10.0)))
@test_util.run_in_graph_and_eager_modes()
def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
exact_kernel_fn):
# Parameters.
input_dim = 5
output_dim = 2000
x_rows = 20
y_rows = 30
x = constant_op.constant(
np.random.uniform(size=(x_rows, input_dim)), dtype=dtypes.float32)
y = constant_op.constant(
np.random.uniform(size=(y_rows, input_dim)), dtype=dtypes.float32)
random_seed.set_random_seed(1234)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# The shapes of output_x and output_y are (x_rows, output_dim) and
# (y_rows, output_dim) respectively.
output_x = math.sqrt(2.0 / output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / output_dim) * rff_layer(y)
approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y)
exact_kernel_matrix = exact_kernel_fn(x, y)
self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.05)
if __name__ == '__main__':
test.main()
|
alsrgv/tensorflow
|
tensorflow/python/keras/layers/kernelized_test.py
|
Python
|
apache-2.0
| 16,217
|
[
"Gaussian"
] |
92a3359a2f185e64ab4acd81179fdea6201da82b995b35be2124abfb255eb411
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
import math
import espressomd.interactions
import espressomd.electrostatics
COULOMB_PREFACTOR = 4.01
@utx.skipIfMissingFeatures(["THOLE", "EXTERNAL_FORCES", "P3M"])
class TestThole(ut.TestCase):
"""
This testcase takes a large box to minimize periodic effects and tests the
thole damping nonbonded interaction forces against the analytical result.
"""
q1 = 1.01
q2 = -1.01
thole_s = 1.99
box_l = 500.0
system = espressomd.System(box_l=[box_l] * 3)
def setUp(self):
self.system.time_step = 0.01
self.system.cell_system.skin = 0.4
self.p0 = self.system.part.add(
pos=[
0, 0, 0], type=0, fix=[
1, 1, 1], q=self.q1)
self.p1 = self.system.part.add(
pos=[
2, 0, 0], type=0, fix=[
1, 1, 1], q=self.q2)
p3m = espressomd.electrostatics.P3M(
prefactor=COULOMB_PREFACTOR, accuracy=1e-6, mesh=3 * [52], cao=4)
self.system.actors.add(p3m)
self.system.non_bonded_inter[0, 0].thole.set_params(
scaling_coeff=self.thole_s, q1q2=self.q1 * self.q2)
def test(self):
res_dForce = []
res_dEnergy = []
ns = 100
for i in range(1, ns):
x = 20.0 * i / ns
self.p1.pos = [x, 0, 0]
self.system.integrator.run(0)
sd = x * self.thole_s
# Force is exact
F_calc = COULOMB_PREFACTOR * self.q1 * self.q2 / x**2 * \
0.5 * (2.0 - (np.exp(-sd) * (sd * (sd + 2.0) + 2.0)))
# Energy is slightly off due to self-energy.
# Error is approximated with erfc for given system parameters
E_calc = COULOMB_PREFACTOR * self.q1 * self.q2 / x * \
(1.0 - np.exp(-sd) * (1.0 + sd / 2.0)) - \
0.250088 * math.erfc(0.741426 * x)
E = self.system.analysis.energy()
E_tot = E["total"]
res_dForce.append(self.p1.f[0] - F_calc)
res_dEnergy.append(E_tot - E_calc)
for f in res_dForce:
self.assertLess(
abs(f), 1e-3, msg="Deviation of thole interaction force (damped coulomb) from analytical result too large")
for e in res_dEnergy:
self.assertLess(
abs(e), 0.012, msg="Deviation of thole interaction energy (damped coulomb) from analytical result too large")
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/thole.py
|
Python
|
gpl-3.0
| 3,279
|
[
"ESPResSo"
] |
0be351882f919f94b2a0261acb164bbad5b41f444390a0c9437a55a7daed41a2
|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, rand, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy._lib._version import NumpyVersion
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform','norm','lognorm','expon','beta',
'powerlaw','bradford','burr','fisk','cauchy','halfcauchy',
'foldcauchy','gamma','gengamma','loggamma',
'alpha','anglit','arcsine','betaprime','dgamma',
'exponnorm', 'exponweib','exponpow','frechet_l','frechet_r',
'gilbrat','f','ncf','chi2','chi','nakagami','genpareto',
'genextreme','genhalflogistic','pareto','lomax','halfnorm',
'halflogistic','fatiguelife','foldnorm','ncx2','t','nct',
'weibull_min','weibull_max','dweibull','maxwell','rayleigh',
'genlogistic', 'logistic','gumbel_l','gumbel_r','gompertz',
'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D,pval = stats.kstest(dist,'', args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
# if (pval < alpha):
# D,pval = stats.kstest(dist,'',args=args, N=1000)
assert_(pval > alpha, msg="D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'frechet':
args = tuple(2*rand(1))+(0,)+tuple(2*rand(2))
elif dist == 'triang':
args = tuple(rand(nargs))
elif dist == 'reciprocal':
vals = rand(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0+rand(nargs))
else:
args = tuple(1.0+rand(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.pdf(x),vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.cdf(x) % 1,vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0,1,numpy.pi,10,100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5,30,size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5,30,size=(2,50))
assert_(numpy.shape(vals) == (2,50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15,46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15,46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k,5,30)
assert_array_almost_equal(vals,out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30,k >= 5],[1.0,(k-5.0+1)/(30-5.0)],0)
vals = stats.randint.cdf(x,5,30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100,1)
vals2 = stats.binom.pmf(0, 100,0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0,1,1)
assert_equal(val,0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1,2,3],0.5)
assert_array_almost_equal(vals,[0.5,0.25,0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1,2,3], 0.5))
vals2 = stats.geom.logpmf([1,2,3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3,-2,-1,0,1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3,-2,-1,0,1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestPoisson(TestCase):
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0,0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0.,0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
assert_allclose(mvsk,
[0.05461496450, 0.0001723162534, 1.020362676, 2.055616582])
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1,0,1,2,3,4]
probability = [0.0,0.3,0.4,0.0,0.3,0.0]
samples = 1000
r = stats.rv_discrete(name='sample',values=(states,probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s,p in zip(states,probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0),1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
assert_almost_equal(simps(stats.genexpon.pdf(numpy.arange(0,10,0.01),
0.5, 0.5, 2.0),
dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
with np.errstate(divide='ignore'):
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0,1,0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0,0.5,1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0,1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5))
assert_equal(rvs.shape, (10,5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5,0.2,0.3]
qk = [0.1,0.25,0.65]
eself = stats.entropy(pk,pk)
edouble = stats.entropy(pk,qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1,3,2,1,2,3,3])
b,c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3,2,2,3,3])
assert_array_equal(c, [2,2,2,2,2])
b,c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b,c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res,floc=0)
vals2 = distfunc.fit(res,fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0,0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2,loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10,5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10,5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10,10), loc=5.,
scale=2.,lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10,10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2,2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4,7)[:,None], np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def text_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982),0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983),0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
olderr = np.seterr(divide='ignore')
try:
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
finally:
np.seterr(**olderr)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
mv3 = stats.lognorm.stats([2, 2.4, -1]) # -1 is not a legal shape parameter
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling" is
# ignoring *args and looking for ``extra_kwarg`` and using that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall('logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall('logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [',\s*,', '\(\s*,', '^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9,1),1)
assert_almost_equal(p, 9.0, decimal=15)
if __name__ == "__main__":
run_module_suite()
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/scipy/stats/tests/test_distributions.py
|
Python
|
gpl-2.0
| 86,559
|
[
"Gaussian"
] |
028b141b0f159c49c34c75f0a764e3e4535d57baf808f56027faff5495486f3d
|
# -*- coding: iso-8859-15 -*-
"""This is just here to make pylint happy """
# Copyright (C) 2002-2009 Tobias Klausmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330 Boston, MA
import fpformat
import math
import re
import urllib2
__author__ = "klausman-pymetar@schwarzvogel.de"
__version__ = "0.15"
__revision__ = "$Rev: 84 $"[6:-2]
__doc__ = """Pymetar v%s (c) 2002-2009 Tobias Klausmann
Pymetar is a python module and command line tool designed to fetch Metar
reports from the NOAA (http://www.noaa.gov) and allow access to the
included weather information.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
Please e-mail bugs to: %s""" % (__version__, __author__)
CLOUD_RE_STR = r"^(CAVOK|CLR|SKC|BKN|SCT|FEW|OVC|NSC)([0-9]{3})?$"
COND_RE_STR = r"^(-|\\+)?(VC|MI|BC|PR|TS|BL|SH|DR|FZ)?\
(DZ|RA|SN|SG|IC|PE|GR|GS|UP|BR|FG|FU|VA|SA|HZ|PY|DU|SQ|SS|DS|PO|\\+?FC)$"
class EmptyReportException(Exception):
"""This gets thrown when the ReportParser gets fed an empty report"""
pass
class EmptyIDException(Exception):
"""This gets thrown when the ReportFetcher is called with an empty ID"""
pass
class NetworkException(Exception):
"""This gets thrown when a network error occurs"""
pass
# What a boring list to type !
#
# It seems the NOAA doesn't want to return plain text, but considering the
# format of their response, this is not to save bandwidth :-)
_WeatherConditions = {
"DZ" : ("Drizzle", "rain", {
"" : "Moderate drizzle",
"-" : "Light drizzle",
"+" : "Heavy drizzle",
"VC" : "Drizzle in the vicinity",
"MI" : "Shallow drizzle",
"BC" : "Patches of drizzle",
"PR" : "Partial drizzle",
"TS" : ("Thunderstorm", "storm"),
"BL" : "Windy drizzle",
"SH" : "Showers",
"DR" : "Drifting drizzle",
"FZ" : "Freezing drizzle",
}),
"RA" : ("Rain", "rain", {
"" : "Moderate rain",
"-" : "Light rain",
"+" : "Heavy rain",
"VC" : "Rain in the vicinity",
"MI" : "Shallow rain",
"BC" : "Patches of rain",
"PR" : "Partial rainfall",
"TS" : ("Thunderstorm", "storm"),
"BL" : "Blowing rainfall",
"SH" : "Rain showers",
"DR" : "Drifting rain",
"FZ" : "Freezing rain",
}),
"SN" : ("Snow", "snow", {
"" : "Moderate snow",
"-" : "Light snow",
"+" : "Heavy snow",
"VC" : "Snow in the vicinity",
"MI" : "Shallow snow",
"BC" : "Patches of snow",
"PR" : "Partial snowfall",
"TS" : ("Snowstorm", "storm"),
"BL" : "Blowing snowfall",
"SH" : "Snowfall showers",
"DR" : "Drifting snow",
"FZ" : "Freezing snow",
}),
"SG" : ("Snow grains", "snow", {
"" : "Moderate snow grains",
"-" : "Light snow grains",
"+" : "Heavy snow grains",
"VC" : "Snow grains in the vicinity",
"MI" : "Shallow snow grains",
"BC" : "Patches of snow grains",
"PR" : "Partial snow grains",
"TS" : ("Snowstorm", "storm"),
"BL" : "Blowing snow grains",
"SH" : "Snow grain showers",
"DR" : "Drifting snow grains",
"FZ" : "Freezing snow grains",
}),
"IC" : ("Ice crystals", "snow", {
"" : "Moderate ice crystals",
"-" : "Few ice crystals",
"+" : "Heavy ice crystals",
"VC" : "Ice crystals in the vicinity",
"BC" : "Patches of ice crystals",
"PR" : "Partial ice crystals",
"TS" : ("Ice crystal storm", "storm"),
"BL" : "Blowing ice crystals",
"SH" : "Showers of ice crystals",
"DR" : "Drifting ice crystals",
"FZ" : "Freezing ice crystals",
}),
"PE" : ("Ice pellets", "snow", {
"" : "Moderate ice pellets",
"-" : "Few ice pellets",
"+" : "Heavy ice pellets",
"VC" : "Ice pellets in the vicinity",
"MI" : "Shallow ice pellets",
"BC" : "Patches of ice pellets",
"PR" : "Partial ice pellets",
"TS" : ("Ice pellets storm", "storm"),
"BL" : "Blowing ice pellets",
"SH" : "Showers of ice pellets",
"DR" : "Drifting ice pellets",
"FZ" : "Freezing ice pellets",
}),
"GR" : ("Hail", "rain", {
"" : "Moderate hail",
"-" : "Light hail",
"+" : "Heavy hail",
"VC" : "Hail in the vicinity",
"MI" : "Shallow hail",
"BC" : "Patches of hail",
"PR" : "Partial hail",
"TS" : ("Hailstorm", "storm"),
"BL" : "Blowing hail",
"SH" : "Hail showers",
"DR" : "Drifting hail",
"FZ" : "Freezing hail",
}),
"GS" : ("Small hail", "rain", {
"" : "Moderate small hail",
"-" : "Light small hail",
"+" : "Heavy small hail",
"VC" : "Small hail in the vicinity",
"MI" : "Shallow small hail",
"BC" : "Patches of small hail",
"PR" : "Partial small hail",
"TS" : ("Small hailstorm", "storm"),
"BL" : "Blowing small hail",
"SH" : "Showers of small hail",
"DR" : "Drifting small hail",
"FZ" : "Freezing small hail",
}),
"UP" : ("Precipitation", "rain", {
"" : "Moderate precipitation",
"-" : "Light precipitation",
"+" : "Heavy precipitation",
"VC" : "Precipitation in the vicinity",
"MI" : "Shallow precipitation",
"BC" : "Patches of precipitation",
"PR" : "Partial precipitation",
"TS" : ("Unknown thunderstorm", "storm"),
"BL" : "Blowing precipitation",
"SH" : "Showers, type unknown",
"DR" : "Drifting precipitation",
"FZ" : "Freezing precipitation",
}),
"BR" : ("Mist", "fog", {
"" : "Moderate mist",
"-" : "Light mist",
"+" : "Thick mist",
"VC" : "Mist in the vicinity",
"MI" : "Shallow mist",
"BC" : "Patches of mist",
"PR" : "Partial mist",
"BL" : "Mist with wind",
"DR" : "Drifting mist",
"FZ" : "Freezing mist",
}),
"FG" : ("Fog", "fog", {
"" : "Moderate fog",
"-" : "Light fog",
"+" : "Thick fog",
"VC" : "Fog in the vicinity",
"MI" : "Shallow fog",
"BC" : "Patches of fog",
"PR" : "Partial fog",
"BL" : "Fog with wind",
"DR" : "Drifting fog",
"FZ" : "Freezing fog",
}),
"FU" : ("Smoke", "fog", {
"" : "Moderate smoke",
"-" : "Thin smoke",
"+" : "Thick smoke",
"VC" : "Smoke in the vicinity",
"MI" : "Shallow smoke",
"BC" : "Patches of smoke",
"PR" : "Partial smoke",
"TS" : ("Smoke w/ thunders", "storm"),
"BL" : "Smoke with wind",
"DR" : "Drifting smoke",
}),
"VA" : ("Volcanic ash", "fog", {
"" : "Moderate volcanic ash",
"+" : "Thick volcanic ash",
"VC" : "Volcanic ash in the vicinity",
"MI" : "Shallow volcanic ash",
"BC" : "Patches of volcanic ash",
"PR" : "Partial volcanic ash",
"TS" : ("Volcanic ash w/ thunders", "storm"),
"BL" : "Blowing volcanic ash",
"SH" : "Showers of volcanic ash",
"DR" : "Drifting volcanic ash",
"FZ" : "Freezing volcanic ash",
}),
"SA" : ("Sand", "fog", {
"" : "Moderate sand",
"-" : "Light sand",
"+" : "Heavy sand",
"VC" : "Sand in the vicinity",
"BC" : "Patches of sand",
"PR" : "Partial sand",
"BL" : "Blowing sand",
"DR" : "Drifting sand",
}),
"HZ" : ("Haze", "fog", {
"" : "Moderate haze",
"-" : "Light haze",
"+" : "Thick haze",
"VC" : "Haze in the vicinity",
"MI" : "Shallow haze",
"BC" : "Patches of haze",
"PR" : "Partial haze",
"BL" : "Haze with wind",
"DR" : "Drifting haze",
"FZ" : "Freezing haze",
}),
"PY" : ("Sprays", "fog", {
"" : "Moderate sprays",
"-" : "Light sprays",
"+" : "Heavy sprays",
"VC" : "Sprays in the vicinity",
"MI" : "Shallow sprays",
"BC" : "Patches of sprays",
"PR" : "Partial sprays",
"BL" : "Blowing sprays",
"DR" : "Drifting sprays",
"FZ" : "Freezing sprays",
}),
"DU" : ("Dust", "fog", {
"" : "Moderate dust",
"-" : "Light dust",
"+" : "Heavy dust",
"VC" : "Dust in the vicinity",
"BC" : "Patches of dust",
"PR" : "Partial dust",
"BL" : "Blowing dust",
"DR" : "Drifting dust",
}),
"SQ" : ("Squall", "storm", {
"" : "Moderate squall",
"-" : "Light squall",
"+" : "Heavy squall",
"VC" : "Squall in the vicinity",
"PR" : "Partial squall",
"TS" : "Thunderous squall",
"BL" : "Blowing squall",
"DR" : "Drifting squall",
"FZ" : "Freezing squall",
}),
"SS" : ("Sandstorm", "fog", {
"" : "Moderate sandstorm",
"-" : "Light sandstorm",
"+" : "Heavy sandstorm",
"VC" : "Sandstorm in the vicinity",
"MI" : "Shallow sandstorm",
"PR" : "Partial sandstorm",
"TS" : ("Thunderous sandstorm", "storm"),
"BL" : "Blowing sandstorm",
"DR" : "Drifting sandstorm",
"FZ" : "Freezing sandstorm",
}),
"DS" : ("Duststorm", "fog", {
"" : "Moderate duststorm",
"-" : "Light duststorm",
"+" : "Heavy duststorm",
"VC" : "Duststorm in the vicinity",
"MI" : "Shallow duststorm",
"PR" : "Partial duststorm",
"TS" : ("Thunderous duststorm", "storm"),
"BL" : "Blowing duststorm",
"DR" : "Drifting duststorm",
"FZ" : "Freezing duststorm",
}),
"PO" : ("Dustwhirls", "fog", {
"" : "Moderate dustwhirls",
"-" : "Light dustwhirls",
"+" : "Heavy dustwhirls",
"VC" : "Dustwhirls in the vicinity",
"MI" : "Shallow dustwhirls",
"BC" : "Patches of dustwhirls",
"PR" : "Partial dustwhirls",
"BL" : "Blowing dustwhirls",
"DR" : "Drifting dustwhirls",
}),
"+FC" : ("Tornado", "storm", {
"" : "Moderate tornado",
"+" : "Raging tornado",
"VC" : "Tornado in the vicinity",
"PR" : "Partial tornado",
"TS" : "Thunderous tornado",
"BL" : "Tornado",
"DR" : "Drifting tornado",
"FZ" : "Freezing tornado",
}),
"FC" : ("Funnel cloud", "fog", {
"" : "Moderate funnel cloud",
"-" : "Light funnel cloud",
"+" : "Thick funnel cloud",
"VC" : "Funnel cloud in the vicinity",
"MI" : "Shallow funnel cloud",
"BC" : "Patches of funnel cloud",
"PR" : "Partial funnel cloud",
"BL" : "Funnel cloud w/ wind",
"DR" : "Drifting funnel cloud",
}),
}
def metar_to_iso8601(metardate) :
"""Convert a metar date to an ISO8601 date."""
if metardate is not None:
(date, hour) = metardate.split()[:2]
(year, month, day) = date.split('.')
# assuming tz is always 'UTC', aka 'Z'
return "%s-%s-%s %s:%s:00Z" % \
(year, month, day, hour[:2], hour[2:4])
def parseLatLong(latlong):
"""
Parse Lat or Long in METAR notation into float values. N and E
are +, S and W are -. Expects one positional string and returns
one float value.
"""
# I know, I could invert this if and put
# the rest of the function into its block,
# but I find it to be more readable this way
if latlong is None:
return None
s = latlong.upper().strip()
elms = s.split('-')
ud = elms[-1][-1]
elms[-1] = elms[-1][:-1]
elms = [int(i) for i in elms]
coords = 0.0
elen = len(elms)
if elen > 2:
coords = coords + float(elms[2])/3600.0
if elen > 1:
coords = coords + float(elms[1])/60.0
coords = coords + float(elms[0])
if ud in ('W', 'S'):
coords = -1.0*coords
f, i = math.modf(coords)
if elen > 2:
f = float(fpformat.sci(f, 4))
elif elen > 1:
f = float(fpformat.sci(f, 2))
else:
f = 0.0
return f+i
class WeatherReport:
"""Incorporates both the unparsed textual representation of the
weather report and the parsed values as soon as they are filled
in by ReportParser."""
def _ClearAllFields(self):
"""Clear all fields values."""
# until finished, report is invalid
self.valid = 0
# Clear all
self.givenstationid = None
self.fullreport = None
self.temp = None
self.tempf = None
self.windspeed = None
self.winddir = None
self.vis = None
self.dewp = None
self.dewpf = None
self.humid = None
self.press = None
self.code = None
self.weather = None
self.sky = None
self.fulln = None
self.cycle = None
self.windcomp = None
self.rtime = None
self.pixmap = None
self.latitude = None
self.longitude = None
self.altitude = None
self.stat_city = None
self.stat_country = None
self.reporturl = None
self.latf = None
self.longf = None
def __init__(self, MetarStationCode = None):
"""Clear all fields and fill in wanted station id."""
self._ClearAllFields()
self.givenstationid = MetarStationCode
def getFullReport(self):
""" Return the complete weather report. """
return self.fullreport
def getTemperatureCelsius(self):
"""
Return the temperature in degrees Celsius.
"""
return self.temp
def getTemperatureFahrenheit(self):
"""
Return the temperature in degrees Fahrenheit.
"""
return self.tempf
def getDewPointCelsius(self):
"""
Return dewpoint in degrees Celsius.
"""
return self.dewp
def getDewPointFahrenheit(self):
"""
Return dewpoint in degrees Fahrenheit.
"""
return self.dewpf
def getWindSpeed(self):
"""
Return the wind speed in meters per second.
"""
return self.windspeed
def getWindSpeedMilesPerHour(self):
"""
Return the wind speed in miles per hour.
"""
if self.windspeed is not None:
return self.windspeedmph
def getWindSpeedBeaufort(self):
"""
Return the wind speed in the Beaufort scale
cf. http://en.wikipedia.org/wiki/Beaufort_scale
"""
if self.windspeed is not None:
return round(math.pow(self.windspeed/0.8359648, 2/3.0))
def getWindSpeedKnots(self):
"""
Return the wind speed in knots
"""
if self.windspeed is not None:
return self.windspeedknots
def getWindDirection(self):
"""
Return wind direction in degrees.
"""
return self.winddir
def getWindCompass(self):
"""
Return wind direction as compass direction
(e.g. NE or SSE)
"""
return self.windcomp
def getVisibilityKilometers(self):
"""
Return visibility in km.
"""
if self.vis is not None:
return self.vis
def getVisibilityMiles(self):
"""
Return visibility in miles.
"""
if self.vis is not None:
return self.vis / 1.609344
def getHumidity(self):
"""
Return relative humidity in percent.
"""
return self.humid
def getPressure(self):
"""
Return pressure in hPa.
"""
return self.press
def getRawMetarCode(self):
"""
Return the encoded weather report.
"""
return self.code
def getWeather(self):
"""
Return short weather conditions
"""
return self.weather
def getSkyConditions(self):
"""
Return sky conditions
"""
return self.sky
def getStationName(self):
"""
Return full station name
"""
return self.fulln
def getStationCity(self):
"""
Return city-part of station name
"""
return self.stat_city
def getStationCountry(self):
"""
Return country-part of station name
"""
return self.stat_country
def getCycle(self):
"""
Return cycle value.
The cycle value is not the frequency or delay between
observations but the "time slot" in which the observation was made.
There are 24 cycle slots every day which usually last from N:45 to
N+1:45. The cycle from 23:45 to 0:45 is cycle 0.
"""
return self.cycle
def getStationPosition(self):
"""
Return latitude, longitude and altitude above sea level of station
as a tuple. Some stations don't deliver altitude, for those, None
is returned as altitude. The lat/longs are expressed as follows:
xx-yyd
where xx is degrees, yy minutes and d the direction.
Thus 51-14N means 51 degrees, 14 minutes north. d may take the
values N, S for latitues and W, E for longitudes. Latitude and
Longitude may include seconds. Altitude is always given as meters
above sea level, including a trailing M.
Schipohl Int. Airport Amsterdam has, for example:
('52-18N', '004-46E', '-2M')
Moenchengladbach (where I live):
('51-14N', '063-03E', None)
If you need lat and long as float values, look at
getStationPositionFloat() instead
"""
# convert self.altitude to string for consistency
return (self.latitude, self.longitude, "%s"%self.altitude)
def getStationPositionFloat(self):
"""
Return latitude and longitude as float values in a
tuple (lat, long, alt).
"""
return (self.latf, self.longf, self.altitude)
def getStationLatitude(self) :
"""
Return the station's latitude in dd-mm[-ss]D format :
dd : degrees
mm : minutes
ss : seconds
D : direction (N, S, E, W)
"""
return self.latitude
def getStationLatitudeFloat(self):
"""
Return latitude as a float
"""
return self.latf
def getStationLongitude(self) :
"""
Return the station's longitude in dd-mm[-ss]D format :
dd : degrees
mm : minutes
ss : seconds
D : direction (N, S, E, W)
"""
return self.longitude
def getStationLongitudeFloat(self):
"""
Return Longitude as a float
"""
return self.longf
def getStationAltitude(self) :
"""
Return the station's altitude above the sea in meters.
"""
return self.altitude
def getReportURL(self):
"""
Return the URL from which the report was fetched.
"""
return self.reporturl
def getTime(self):
"""
Return the time when the observation was made. Note that this
is *not* the time when the report was fetched by us
Format: YYYY.MM.DD HHMM UTC
Example: 2002.04.01 1020 UTC
"""
return self.rtime
def getISOTime(self):
"""
Return the time when the observation was made in ISO 8601 format
(e.g. 2002-07-25 15:12:00Z)
"""
return(metar_to_iso8601(self.rtime))
def getPixmap(self):
"""
Return a suggested pixmap name, without extension, depending on
current weather.
"""
return self.pixmap
class ReportParser:
"""Parse raw METAR data from a WeatherReport object into actual
values and return the object with the values filled in."""
def __init__(self, MetarReport = None):
"""Set attribute Report as specified on instantation."""
self.Report = MetarReport
def extractCloudInformation(self) :
"""
Extract cloud information. Return None or a tuple (sky type as a
string of text and suggested pixmap name)
"""
wcloud = self.match_WeatherPart(CLOUD_RE_STR)
if wcloud is not None :
stype = wcloud[:3]
if stype in ("CLR", "SKC", "CAV", "NSC"):
return ("Clear sky", "sun")
elif stype == "BKN" :
return ("Broken clouds", "suncloud")
elif stype == "SCT" :
return ("Scattered clouds", "suncloud")
elif stype == "FEW" :
return ("Few clouds", "suncloud")
elif stype == "OVC" :
return ("Overcast", "cloud")
else:
return None # Not strictly necessary
def extractSkyConditions(self) :
"""
Extract sky condition information from the encoded report. Return
a tuple containing the description of the sky conditions as a
string and a suggested pixmap name for an icon representing said
sky condition.
"""
wcond = self.match_WeatherPart(COND_RE_STR)
if wcond is not None :
if (len(wcond)>3) and (wcond.startswith('+') \
or wcond.startswith('-')) :
wcond = wcond[1:]
if wcond.startswith('+') or wcond.startswith('-') :
pphen = 1
elif len(wcond) < 4 :
pphen = 0
else :
pphen = 2
squal = wcond[:pphen]
sphen = wcond[pphen : pphen + 4]
phenomenon = _WeatherConditions.get(sphen, None)
if phenomenon is not None :
(name, pixmap, phenomenon) = phenomenon
pheninfo = phenomenon.get(squal, name)
if type(pheninfo) != type(()) :
return (pheninfo, pixmap)
else :
# contains pixmap info
return pheninfo
def match_WeatherPart(self, regexp) :
"""
Return the matching part of the encoded Metar report.
regexp: the regexp needed to extract this part.
Return the first matching string or None.
WARNING: Some Metar reports may contain several matching
strings, only the first one is taken into account!
"""
if self.Report.code is not None :
rg = re.compile(regexp)
for wpart in self.Report.getRawMetarCode().split() :
match = rg.match(wpart)
if match:
return match.string[match.start(0) : match.end(0)]
def ParseReport(self, MetarReport = None):
"""Take report with raw info only and return it with in
parsed values filled in. Note: This function edits the
WeatherReport object you supply!"""
if self.Report is None and MetarReport is None:
raise EmptyReportException, \
"No report given on init and ParseReport()."
elif MetarReport is not None:
self.Report = MetarReport
lines = self.Report.fullreport.split("\n")
for line in lines:
try:
header, data = line.split(":", 1)
except ValueError:
header = data = line
header = header.strip()
data = data.strip()
# The station id inside the report
# As the station line may contain additional sets of (),
# we have to search from the rear end and flip things around
if header.find("("+self.Report.givenstationid+")") != -1:
id_offset = header.find("("+self.Report.givenstationid+")")
loc = data[:id_offset]
p = data[id_offset:]
try:
loc = loc.strip()
rloc = loc[::-1]
rcoun, rcity = rloc.split(",", 1)
except ValueError:
rcity = ""
rcoun = ""
p = data
try:
lat, lng, ht = p.split()[1:4]
ht = int(ht[:-1]) # cut off 'M' for meters
except ValueError:
(lat, lng) = p.split()[1:3]
ht = None
self.Report.stat_city = rcity.strip()[::-1]
self.Report.stat_country = rcoun.strip()[::-1]
self.Report.fulln = loc
self.Report.latitude = lat
self.Report.longitude = lng
self.Report.latf = parseLatLong(lat)
self.Report.longf = parseLatLong(lng)
self.Report.altitude = ht
# The line containing date and time of the report
# We have to make sure that the station ID is *not*
# in this line to avoid trying to parse the ob: line
elif ((data.find("UTC")) != -1 and \
(data.find(self.Report.givenstationid)) == -1):
rt = data.split("/")[1]
self.Report.rtime = rt.strip()
# temperature
elif (header == "Temperature"):
f, c = data.split(None, 3)[0:3:2]
self.Report.tempf = float(f)
# The string we have split is "(NN C)", hence the slice
self.Report.temp = float(c[1:])
# wind dir and speed
elif (header == "Wind"):
if (data.find("Calm") != -1):
self.Report.windspeed = 0.0
self.Report.windspeedkt = 0.0
self.Report.windspeedmph = 0.0
self.Report.winddir = None
self.Report.windcomp = None
elif (data.find("Variable") != -1):
speed = data.split(" ", 3)[2]
self.Report.windspeed = (float(speed)*0.44704)
self.Report.windspeedkt = int(data.split(" ", 5)[4][1:])
self.Report.windspeedmph = int(speed)
self.Report.winddir = None
self.Report.windcomp = None
else:
fields = data.split(" ", 9)[0:9]
f = fields[0]
comp = fields[2]
deg = fields[3]
speed = fields[6]
speedkt = fields[8][1:]
del fields
self.Report.winddir = int(deg[1:])
self.Report.windcomp = comp.strip()
self.Report.windspeed = (float(speed)*0.44704)
self.Report.windspeedkt = (int(speedkt))
self.Report.windspeedmph = int(speed)
# visibility
elif (header == "Visibility"):
for d in data.split():
try:
self.Report.vis = float(d)*1.609344
break
except ValueError:
self.Report.vis = None
break
# dew point
elif (header == "Dew Point"):
f, c = data.split(None, 3)[0:3:2]
self.Report.dewpf = float(f)
# The string we have split is "(NN C)", hence the slice
self.Report.dewp = float(c[1:])
# humidity
elif (header == "Relative Humidity"):
h = data.split("%", 1)[0]
self.Report.humid = int(h)
# pressure
elif (header == "Pressure (altimeter)"):
p = data.split(" ", 1)[0]
self.Report.press = (float(p)*33.863886)
# shot weather desc. ("rain", "mist", ...)
elif (header == "Weather"):
self.Report.weather = data
# short desc. of sky conditions
elif (header == "Sky conditions"):
self.Report.sky = data
# the encoded report itself
elif (header == "ob"):
self.Report.code = data.strip()
# the cycle value ("time slot")
elif (header == "cycle"):
self.Report.cycle = int(data)
# cloud info
cloudinfo = self.extractCloudInformation()
if cloudinfo is not None :
(cloudinfo, cloudpixmap) = cloudinfo
else :
(cloudinfo, cloudpixmap) = (None, None)
conditions = self.extractSkyConditions()
if conditions is not None :
(conditions, condpixmap) = conditions
else :
(conditions, condpixmap) = (None, None)
# fill the weather information
self.Report.weather = self.Report.weather or conditions or cloudinfo
# Pixmap guessed from general conditions has priority
# over pixmap guessed from clouds
self.Report.pixmap = condpixmap or cloudpixmap
# report is complete
self.Report.valid = 1
return self.Report
class ReportFetcher:
"""Fetches a report from a given METAR id, optionally taking into
account a different baseurl and using environment var-specified
proxies."""
def __init__(self, MetarStationCode = None, baseurl = \
"http://weather.noaa.gov/pub/data/observations/metar/decoded/"):
"""Set stationid attribute and base URL to fetch report from"""
self.stationid = MetarStationCode
self.baseurl = baseurl
def MakeReport(self, StationID, RawReport):
"""
Take a string (RawReport) and a station code and turn it
into an object suitable for ReportParser
"""
self.reporturl = "%s%s.TXT" % (self.baseurl, StationID)
self.fullreport = RawReport
report = WeatherReport(StationID)
report.reporturl = self.reporturl
report.fullreport = self.fullreport
self.report = report # Caching it for GetReport()
return report
def FetchReport(self, StationCode = None, proxy = None):
"""
Fetch a report for a given station ID from the baseurl given
upon creation of the ReportFetcher instance.
If proxy is not None, a proxy URL of the form
protocol://user:password@host.name.tld:port/
is expected, for example:
http://squid.somenet.com:3128/
If no proxy is specified, the environment variable http_proxy
is inspected. If it isn't set, a direct connection is tried.
"""
if self.stationid is None and StationCode is None:
raise EmptyIDException, \
"No ID given on init and FetchReport()."
elif StationCode is not None:
self.stationid = StationCode
self.stationid = self.stationid.upper()
self.reporturl = "%s%s.TXT" % (self.baseurl, self.stationid)
if proxy:
p_dict = {'http': proxy}
p_handler = urllib2.ProxyHandler(p_dict)
opener = urllib2.build_opener(p_handler, urllib2.HTTPHandler)
urllib2.install_opener(opener)
else:
urllib2.install_opener(
urllib2.build_opener(urllib2.ProxyHandler, urllib2.HTTPHandler))
try:
fn = urllib2.urlopen(self.reporturl)
except urllib2.HTTPError, why:
raise NetworkException, why
# Dump entire report in a variable
self.fullreport = fn.read()
if fn.info().status:
raise NetworkException, "Could not fetch METAR report"
report = WeatherReport(self.stationid)
report.reporturl = self.reporturl
report.fullreport = self.fullreport
self.report = report # Caching it for GetReport()
return report
def GetReport(self):
"""Get a previously fetched report again"""
return self.report
|
smulloni/pymetar
|
pymetar.py
|
Python
|
gpl-2.0
| 39,961
|
[
"CRYSTAL"
] |
c75c07b0a6fffdb8ffdec1764635b8726115ac457917597f941765dbddf90ee2
|
import os
import shutil
import struct
import tempfile
import warnings
import shlex
import subprocess as sp
import numpy as np
from . import database
from . import anat
def get_paths(subject, hemi, type="patch"):
base = os.path.join(os.environ['SUBJECTS_DIR'], subject)
if type == "patch":
return os.path.join(base, "surf", hemi+".{name}.patch.3d")
elif type == "surf":
return os.path.join(base, "surf", hemi+".{name}")
elif type == "curv":
return os.path.join(base, "surf", hemi+".curv{name}")
def autorecon(subject, type="all"):
types = {
'all':'autorecon-all',
'1':"autorecon1",
'2':"autorecon2",
'3':"autorecon3",
'cp':"autorecon2-cp",
'wm':"autorecon2-wm",
'pia':"autorecon2-pial"}
times = {
'all':"12 hours",
'2':"6 hours",
'cp':"8 hours",
'wm':"4 hours"
}
if str(type) in times:
resp = raw_input("recon-all will take approximately %s to run! Continue? "%times[str(type)])
if resp.lower() not in ("yes", "y"):
return
cmd = "recon-all -s {subj} -{cmd}".format(subj=subject, cmd=types[str(type)])
sp.check_call(shlex.split(cmd))
def flatten(subject, hemi, patch):
resp = raw_input('Flattening takes approximately 2 hours! Continue? ')
if resp.lower() in ('y', 'yes'):
inpath = get_paths(subject, hemi).format(name=patch)
outpath = get_paths(subject, hemi).format(name=patch+".flat")
cmd = "mris_flatten -O fiducial {inpath} {outpath}".format(inpath=inpath, outpath=outpath)
sp.check_call(shlex.split(cmd))
else:
print("Not going to flatten...")
def import_subj(subject, sname=None):
if sname is None:
sname = subject
database.db.make_subj(sname)
import nibabel
surfs = os.path.join(database.default_filestore, sname, "surfaces", "{name}_{hemi}.gii")
anats = os.path.join(database.default_filestore, sname, "anatomicals", "{name}.nii.gz")
surfinfo = os.path.join(database.default_filestore, sname, "surface-info", "{name}.npz")
fspath = os.path.join(os.environ['SUBJECTS_DIR'], subject, 'mri')
curvs = os.path.join(os.environ['SUBJECTS_DIR'], subject, 'surf', '{hemi}.{name}')
#import anatomicals
for fsname, name in dict(T1="raw", aseg="aseg").items():
path = os.path.join(fspath, "{fsname}.mgz").format(fsname=fsname)
out = anats.format(subj=sname, name=name)
cmd = "mri_convert {path} {out}".format(path=path, out=out)
sp.call(shlex.split(cmd))
if not os.path.exists(curvs.format(hemi="lh", name="fiducial")):
make_fiducial(subject)
#Freesurfer uses FOV/2 for center, let's set the surfaces to use the magnet isocenter
trans = nibabel.load(out).get_affine()[:3, -1]
surfmove = trans - np.sign(trans) * [128, 128, 128]
from . import formats
#import surfaces
for fsname, name in [('smoothwm',"wm"), ('pial',"pia"), ('inflated',"inflated")]:
for hemi in ("lh", "rh"):
pts, polys, _ = get_surf(subject, hemi, fsname)
fname = surfs.format(subj=sname, name=name, hemi=hemi)
formats.write_gii(fname, pts=pts + surfmove, polys=polys)
#import surfinfo
for curv, info in dict(sulc="sulcaldepth", thickness="thickness", curv="curvature").items():
lh, rh = [parse_curv(curvs.format(hemi=hemi, name=curv)) for hemi in ['lh', 'rh']]
np.savez(surfinfo.format(subj=sname, name=info), left=-lh, right=-rh)
def import_flat(subject, patch, sname=None):
if sname is None:
sname = subject
surfs = os.path.join(database.default_filestore, sname, "surfaces", "flat_{hemi}.gii")
from . import formats
for hemi in ['lh', 'rh']:
pts, polys, _ = get_surf(subject, hemi, "patch", patch+".flat")
flat = pts[:,[1, 0, 2]]
flat[:,1] = -flat[:,1]
fname = surfs.format(hemi=hemi)
print("saving to %s"%fname)
formats.write_gii(fname, pts=flat, polys=polys)
#clear the cache, per #81
cache = os.path.join(database.default_filestore, sname, "cache")
shutil.rmtree(cache)
os.makedirs(cache)
def make_fiducial(subject):
for hemi in ['lh', 'rh']:
spts, polys, _ = get_surf(subject, hemi, "smoothwm")
ppts, _, _ = get_surf(subject, hemi, "pial")
fname = get_paths(subject, hemi, "surf").format(name="fiducial")
write_surf(fname, (spts + ppts) / 2, polys)
def parse_surf(filename):
with open(filename, 'rb') as fp:
#skip magic
fp.seek(3)
comment = fp.readline()
fp.readline()
print(comment)
verts, faces = struct.unpack('>2I', fp.read(8))
pts = np.fromstring(fp.read(4*3*verts), dtype='f4').byteswap()
polys = np.fromstring(fp.read(4*3*faces), dtype='I4').byteswap()
return pts.reshape(-1, 3), polys.reshape(-1, 3)
def write_surf(filename, pts, polys, comment=''):
with open(filename, 'wb') as fp:
fp.write('\xff\xff\xfe')
fp.write(comment+'\n\n')
fp.write(struct.pack('>2I', len(pts), len(polys)))
fp.write(pts.astype(np.float32).byteswap().tostring())
fp.write(polys.astype(np.uint32).byteswap().tostring())
fp.write('\n')
def parse_curv(filename):
with open(filename, 'rb') as fp:
fp.seek(15)
return np.fromstring(fp.read(), dtype='>f4').byteswap().newbyteorder()
def parse_patch(filename):
with open(filename, 'rb') as fp:
header, = struct.unpack('>i', fp.read(4))
nverts, = struct.unpack('>i', fp.read(4))
data = np.fromstring(fp.read(), dtype=[('vert', '>i4'), ('x', '>f4'), ('y', '>f4'), ('z', '>f4')])
assert len(data) == nverts
return data
def get_surf(subject, hemi, type, patch=None):
if type == "patch":
assert patch is not None
surf_file = get_paths(subject, hemi, 'surf').format(name='smoothwm')
else:
surf_file = get_paths(subject, hemi, 'surf').format(name=type)
pts, polys = parse_surf(surf_file)
if patch is not None:
patch_file = get_paths(subject, hemi, 'patch').format(name=patch)
patch = parse_patch(patch_file)
verts = patch[patch['vert'] > 0]['vert'] - 1
edges = -patch[patch['vert'] < 0]['vert'] - 1
idx = np.zeros((len(pts),), dtype=bool)
idx[verts] = True
idx[edges] = True
valid = idx[polys.ravel()].reshape(-1, 3).all(1)
polys = polys[valid]
idx = np.zeros((len(pts),))
idx[verts] = 1
idx[edges] = -1
if type == "patch":
for i, x in enumerate(['x', 'y', 'z']):
pts[verts, i] = patch[patch['vert'] > 0][x]
pts[edges, i] = patch[patch['vert'] < 0][x]
return pts, polys, idx
return pts, polys, get_curv(subject, hemi)
def get_curv(subject, hemi, type='wm'):
if type == "wm":
curv_file = get_paths(subject, hemi, 'curv').format(name='')
else:
curv_file = get_paths(subject, hemi, 'curv').format(name='.'+type)
return parse_curv(curv_file)
def show_surf(subject, hemi, type, patch=None, curv=True):
from mayavi import mlab
from tvtk.api import tvtk
pts, polys, idx = get_surf(subject, hemi, type, patch)
if curv:
curv = get_curv(subject, hemi)
else:
curv = idx
fig = mlab.figure()
src = mlab.pipeline.triangular_mesh_source(pts[:,0], pts[:,1], pts[:,2], polys, scalars=curv, figure=fig)
norms = mlab.pipeline.poly_data_normals(src, figure=fig)
norms.filter.splitting = False
surf = mlab.pipeline.surface(norms, figure=fig)
surf.parent.scalar_lut_manager.set(lut_mode='RdBu', data_range=[-1,1], use_default_range=False)
cursors = mlab.pipeline.scalar_scatter([0], [0], [0])
glyphs = mlab.pipeline.glyph(cursors, figure=fig)
glyphs.glyph.glyph_source.glyph_source = glyphs.glyph.glyph_source.glyph_dict['axes']
fig.scene.background = (0,0,0)
fig.scene.interactor.interactor_style = tvtk.InteractorStyleTerrain()
path = os.path.join(os.environ['SUBJECTS_DIR'], subject)
def picker_callback(picker):
if picker.actor in surf.actor.actors:
npts = np.append(cursors.data.points.to_array(), [pts[picker.point_id]], axis=0)
cursors.data.points = npts
print(picker.point_id)
x, y, z = pts[picker.point_id]
with open(os.path.join(path, 'tmp', 'edit.dat'), 'w') as fp:
fp.write('%f %f %f\n'%(x, y, z))
picker = fig.on_mouse_pick(picker_callback)
picker.tolerance = 0.01
mlab.show()
return fig, surf
def write_dot(fname, pts, polys, name="test"):
import networkx as nx
def iter_surfedges(tris):
for a,b,c in tris:
yield a,b
yield b,c
yield a,c
graph = nx.Graph()
graph.add_edges_from(iter_surfedges(polys))
lengths = []
with open(fname, "w") as fp:
fp.write("graph %s {\n"%name)
fp.write('node [shape=point,label=""];\n')
for a, b in graph.edges_iter():
l = np.sqrt(((pts[a] - pts[b])**2).sum(-1))
lengths.append(l)
fp.write("%s -- %s [len=%f];\n"%(a, b, l))
fp.write("maxiter=1000000;\n");
fp.write("}")
def read_dot(fname, pts):
import re
parse = re.compile(r'\s(\d+)\s\[label="", pos="([\d\.]+),([\d\.]+)".*];')
data = np.zeros((len(pts), 2))
with open(fname) as fp:
fp.readline()
fp.readline()
fp.readline()
fp.readline()
el = fp.readline().split(' ')
while el[1] != '--':
x, y = el[2][5:-2].split(',')
data[int(el[0][1:])] = float(x), float(y)
el = fp.readline().split(' ')
return data
def write_decimated(path, pts, polys):
from .polyutils import decimate, boundary_edges
dpts, dpolys = decimate(pts, polys)
write_surf(path+'.smoothwm', dpts, dpolys)
edges = boundary_edges(dpolys)
data = np.zeros((len(dpts),), dtype=[('vert', '>i4'), ('x', '>f4'), ('y', '>f4'), ('z', '>f4')])
data['vert'] = np.arange(len(dpts))+1
data['vert'][edges] *= -1
data['x'] = dpts[:,0]
data['y'] = dpts[:,1]
data['z'] = dpts[:,2]
with open(path+'.full.patch.3d', 'wb') as fp:
fp.write(struct.pack('>i', -1))
fp.write(struct.pack('>i', len(dpts)))
fp.write(data.tostring())
import copy
class SpringLayout(object):
def __init__(self, pts, polys, dpts=None, pins=None, stepsize=1, neighborhood=0):
self.pts = pts
self.polys = polys
self.stepsize = stepsize
pinmask = np.zeros((len(pts),), dtype=bool)
if isinstance(pins, (list, set, np.ndarray)):
pinmask[pins] = True
self.pins = pinmask
self.neighbors = [set() for _ in range(len(pts))]
for i, j, k in polys:
self.neighbors[i].add(j)
self.neighbors[i].add(k)
self.neighbors[j].add(i)
self.neighbors[j].add(k)
self.neighbors[k].add(i)
self.neighbors[k].add(j)
for _ in range(neighborhood):
_neighbors = copy.deepcopy(self.neighbors)
for v, neighbors in enumerate(self.neighbors):
for n in neighbors:
_neighbors[v] |= self.neighbors[n]
self.neighbors = _neighbors
for i in range(len(self.neighbors)):
self.neighbors[i] = list(self.neighbors[i] - set([i]))
if dpts is None:
dpts = pts
#self.kdt = cKDTree(self.pts)
self._next = self.pts.copy()
width = max(len(n) for n in self.neighbors)
self._mask = np.zeros((len(pts), width), dtype=bool)
self._move = np.zeros((len(pts), width, 3))
#self._mean = np.zeros((len(pts), width))
self._num = np.zeros((len(pts),))
self._dists = []
self._idx = []
for i, n in enumerate(self.neighbors):
self._mask[i, :len(n)] = True
self._dists.append(np.sqrt(((dpts[n] - dpts[i])**2).sum(-1)))
self._idx.append(np.ones((len(n),))*i)
self._num[i] = len(n)
self._dists = np.hstack(self._dists)
self._idx = np.hstack(self._idx).astype(np.uint)
self._neigh = np.hstack(self.neighbors).astype(np.uint)
self.figure = None
def _spring(self):
svec = self.pts[self._neigh] - self.pts[self._idx]
slen = np.sqrt((svec**2).sum(-1))
force = (slen - self._dists) # / self._dists
svec /= slen[:,np.newaxis]
fvec = force[:, np.newaxis] * svec
self._move[self._mask] = self.stepsize * fvec
return self._move.sum(1) / self._num[:, np.newaxis]
def _estatic(self, idx):
dist, neighbors = self.kdt.query(self.pts[idx], k=20)
valid = dist > 0
mag = self.stepsize * (1 / dist)
diff = self.pts[neighbors] - self.pts[idx]
return (mag[valid] * diff[valid].T).T.mean(0)
def step(self):
move = self._spring()[~self.pins]
self._next[~self.pins] += move #+ self._estatic(i)
self.pts = self._next.copy()
return dict(x=self.pts[:,0],y=self.pts[:, 1], z=self.pts[:,2]), move
#self.kdt = cKDTree(self.pts)
def run(self, n=1000):
for _ in range(n):
self.step()
print(_)
def view_step(self):
from mayavi import mlab
if self.figure is None:
self.figure = mlab.triangular_mesh(self.pts[:,0], self.pts[:,1], self.pts[:,2], self.polys, representation='wireframe')
self.step()
self.figure.mlab_source.set(x=self.pts[:,0], y=self.pts[:,1], z=self.pts[:,2])
def stretch_mwall(pts, polys, mwall):
inflated = pts.copy()
center = pts[mwall].mean(0)
radius = max((pts.max(0) - pts.min(0))[1:])
angles = np.arctan2(pts[mwall][:,2], pts[mwall][:,1])
pts[mwall, 0] = center[0]
pts[mwall, 1] = radius * np.cos(angles) + center[1]
pts[mwall, 2] = radius * np.sin(angles) + center[2]
return SpringLayout(pts, polys, inflated, pins=mwall)
if __name__ == "__main__":
import sys
show_surf(sys.argv[1], sys.argv[2], sys.argv[3])
|
CVML/pycortex
|
cortex/freesurfer.py
|
Python
|
bsd-2-clause
| 14,249
|
[
"Mayavi"
] |
8b3db23bb2fda8eddae5b32f2568204259a60e9d437daf43683e7fc5bb6cfb88
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.spanner_v1.services.spanner import SpannerAsyncClient
from google.cloud.spanner_v1.services.spanner import SpannerClient
from google.cloud.spanner_v1.services.spanner import pagers
from google.cloud.spanner_v1.services.spanner import transports
from google.cloud.spanner_v1.types import commit_response
from google.cloud.spanner_v1.types import keys
from google.cloud.spanner_v1.types import mutation
from google.cloud.spanner_v1.types import result_set
from google.cloud.spanner_v1.types import spanner
from google.cloud.spanner_v1.types import transaction
from google.cloud.spanner_v1.types import type as gs_type
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SpannerClient._get_default_mtls_endpoint(None) is None
assert SpannerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
SpannerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
)
assert (
SpannerClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SpannerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert SpannerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [SpannerClient, SpannerAsyncClient,])
def test_spanner_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "spanner.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SpannerGrpcTransport, "grpc"),
(transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_spanner_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [SpannerClient, SpannerAsyncClient,])
def test_spanner_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "spanner.googleapis.com:443"
def test_spanner_client_get_transport_class():
transport = SpannerClient.get_transport_class()
available_transports = [
transports.SpannerGrpcTransport,
]
assert transport in available_transports
transport = SpannerClient.get_transport_class("grpc")
assert transport == transports.SpannerGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient)
)
@mock.patch.object(
SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient)
)
def test_spanner_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SpannerClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SpannerClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(SpannerClient, transports.SpannerGrpcTransport, "grpc", "true"),
(
SpannerAsyncClient,
transports.SpannerGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(SpannerClient, transports.SpannerGrpcTransport, "grpc", "false"),
(
SpannerAsyncClient,
transports.SpannerGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient)
)
@mock.patch.object(
SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_spanner_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [SpannerClient, SpannerAsyncClient])
@mock.patch.object(
SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient)
)
@mock.patch.object(
SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient)
)
def test_spanner_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_spanner_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(SpannerClient, transports.SpannerGrpcTransport, "grpc", grpc_helpers),
(
SpannerAsyncClient,
transports.SpannerGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_spanner_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_spanner_client_client_options_from_dict():
with mock.patch(
"google.cloud.spanner_v1.services.spanner.transports.SpannerGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SpannerClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(SpannerClient, transports.SpannerGrpcTransport, "grpc", grpc_helpers),
(
SpannerAsyncClient,
transports.SpannerGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_spanner_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.data",
),
scopes=None,
default_host="spanner.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [spanner.CreateSessionRequest, dict,])
def test_create_session(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session(name="name_value",)
response = client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == "name_value"
def test_create_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
client.create_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
@pytest.mark.asyncio
async def test_create_session_async(
transport: str = "grpc_asyncio", request_type=spanner.CreateSessionRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.Session(name="name_value",)
)
response = await client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_create_session_async_from_dict():
await test_create_session_async(request_type=dict)
def test_create_session_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CreateSessionRequest()
request.database = "database/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
call.return_value = spanner.Session()
client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database=database/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_session_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CreateSessionRequest()
request.database = "database/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
await client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database=database/value",) in kw["metadata"]
def test_create_session_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_session(database="database_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = "database_value"
assert arg == mock_val
def test_create_session_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_session(
spanner.CreateSessionRequest(), database="database_value",
)
@pytest.mark.asyncio
async def test_create_session_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_session(database="database_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = "database_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_session_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_session(
spanner.CreateSessionRequest(), database="database_value",
)
@pytest.mark.parametrize("request_type", [spanner.BatchCreateSessionsRequest, dict,])
def test_batch_create_sessions(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
response = client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.BatchCreateSessionsResponse)
def test_batch_create_sessions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
client.batch_create_sessions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
@pytest.mark.asyncio
async def test_batch_create_sessions_async(
transport: str = "grpc_asyncio", request_type=spanner.BatchCreateSessionsRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.BatchCreateSessionsResponse()
)
response = await client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.BatchCreateSessionsResponse)
@pytest.mark.asyncio
async def test_batch_create_sessions_async_from_dict():
await test_batch_create_sessions_async(request_type=dict)
def test_batch_create_sessions_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BatchCreateSessionsRequest()
request.database = "database/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
call.return_value = spanner.BatchCreateSessionsResponse()
client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database=database/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_sessions_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BatchCreateSessionsRequest()
request.database = "database/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.BatchCreateSessionsResponse()
)
await client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database=database/value",) in kw["metadata"]
def test_batch_create_sessions_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_sessions(
database="database_value", session_count=1420,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = "database_value"
assert arg == mock_val
arg = args[0].session_count
mock_val = 1420
assert arg == mock_val
def test_batch_create_sessions_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_sessions(
spanner.BatchCreateSessionsRequest(),
database="database_value",
session_count=1420,
)
@pytest.mark.asyncio
async def test_batch_create_sessions_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.BatchCreateSessionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_sessions(
database="database_value", session_count=1420,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = "database_value"
assert arg == mock_val
arg = args[0].session_count
mock_val = 1420
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_create_sessions_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_sessions(
spanner.BatchCreateSessionsRequest(),
database="database_value",
session_count=1420,
)
@pytest.mark.parametrize("request_type", [spanner.GetSessionRequest, dict,])
def test_get_session(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session(name="name_value",)
response = client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == "name_value"
def test_get_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
client.get_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
@pytest.mark.asyncio
async def test_get_session_async(
transport: str = "grpc_asyncio", request_type=spanner.GetSessionRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.Session(name="name_value",)
)
response = await client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_session_async_from_dict():
await test_get_session_async(request_type=dict)
def test_get_session_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.GetSessionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
call.return_value = spanner.Session()
client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_session_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.GetSessionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
await client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_session_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_session(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_session_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_session(
spanner.GetSessionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_session_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_session(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_session_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_session(
spanner.GetSessionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [spanner.ListSessionsRequest, dict,])
def test_list_sessions(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_sessions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
client.list_sessions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
@pytest.mark.asyncio
async def test_list_sessions_async(
transport: str = "grpc_asyncio", request_type=spanner.ListSessionsRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.ListSessionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_sessions_async_from_dict():
await test_list_sessions_async(request_type=dict)
def test_list_sessions_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ListSessionsRequest()
request.database = "database/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
call.return_value = spanner.ListSessionsResponse()
client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database=database/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_sessions_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ListSessionsRequest()
request.database = "database/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.ListSessionsResponse()
)
await client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database=database/value",) in kw["metadata"]
def test_list_sessions_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_sessions(database="database_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = "database_value"
assert arg == mock_val
def test_list_sessions_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_sessions(
spanner.ListSessionsRequest(), database="database_value",
)
@pytest.mark.asyncio
async def test_list_sessions_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.ListSessionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_sessions(database="database_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = "database_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_sessions_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_sessions(
spanner.ListSessionsRequest(), database="database_value",
)
def test_list_sessions_pager(transport_name: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(), spanner.Session(),],
next_page_token="abc",
),
spanner.ListSessionsResponse(sessions=[], next_page_token="def",),
spanner.ListSessionsResponse(
sessions=[spanner.Session(),], next_page_token="ghi",
),
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("database", ""),)),
)
pager = client.list_sessions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, spanner.Session) for i in results)
def test_list_sessions_pages(transport_name: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sessions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(), spanner.Session(),],
next_page_token="abc",
),
spanner.ListSessionsResponse(sessions=[], next_page_token="def",),
spanner.ListSessionsResponse(
sessions=[spanner.Session(),], next_page_token="ghi",
),
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(),],
),
RuntimeError,
)
pages = list(client.list_sessions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_sessions_async_pager():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(), spanner.Session(),],
next_page_token="abc",
),
spanner.ListSessionsResponse(sessions=[], next_page_token="def",),
spanner.ListSessionsResponse(
sessions=[spanner.Session(),], next_page_token="ghi",
),
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(),],
),
RuntimeError,
)
async_pager = await client.list_sessions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, spanner.Session) for i in responses)
@pytest.mark.asyncio
async def test_list_sessions_async_pages():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(), spanner.Session(),],
next_page_token="abc",
),
spanner.ListSessionsResponse(sessions=[], next_page_token="def",),
spanner.ListSessionsResponse(
sessions=[spanner.Session(),], next_page_token="ghi",
),
spanner.ListSessionsResponse(
sessions=[spanner.Session(), spanner.Session(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_sessions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [spanner.DeleteSessionRequest, dict,])
def test_delete_session(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
client.delete_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
@pytest.mark.asyncio
async def test_delete_session_async(
transport: str = "grpc_asyncio", request_type=spanner.DeleteSessionRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_session_async_from_dict():
await test_delete_session_async(request_type=dict)
def test_delete_session_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.DeleteSessionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
call.return_value = None
client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_session_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.DeleteSessionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_session_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_session(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_session_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_session(
spanner.DeleteSessionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_session_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_session), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_session(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_session_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_session(
spanner.DeleteSessionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [spanner.ExecuteSqlRequest, dict,])
def test_execute_sql(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.execute_sql), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = result_set.ResultSet()
response = client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
def test_execute_sql_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.execute_sql), "__call__") as call:
client.execute_sql()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
@pytest.mark.asyncio
async def test_execute_sql_async(
transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.execute_sql), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
result_set.ResultSet()
)
response = await client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
@pytest.mark.asyncio
async def test_execute_sql_async_from_dict():
await test_execute_sql_async(request_type=dict)
def test_execute_sql_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.execute_sql), "__call__") as call:
call.return_value = result_set.ResultSet()
client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_execute_sql_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.execute_sql), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
result_set.ResultSet()
)
await client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [spanner.ExecuteSqlRequest, dict,])
def test_execute_streaming_sql(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter([result_set.PartialResultSet()])
response = client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, result_set.PartialResultSet)
def test_execute_streaming_sql_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql), "__call__"
) as call:
client.execute_streaming_sql()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
@pytest.mark.asyncio
async def test_execute_streaming_sql_async(
transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[result_set.PartialResultSet()]
)
response = await client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, result_set.PartialResultSet)
@pytest.mark.asyncio
async def test_execute_streaming_sql_async_from_dict():
await test_execute_streaming_sql_async(request_type=dict)
def test_execute_streaming_sql_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql), "__call__"
) as call:
call.return_value = iter([result_set.PartialResultSet()])
client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_execute_streaming_sql_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql), "__call__"
) as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[result_set.PartialResultSet()]
)
await client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [spanner.ExecuteBatchDmlRequest, dict,])
def test_execute_batch_dml(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ExecuteBatchDmlResponse()
response = client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.ExecuteBatchDmlResponse)
def test_execute_batch_dml_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml), "__call__"
) as call:
client.execute_batch_dml()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
@pytest.mark.asyncio
async def test_execute_batch_dml_async(
transport: str = "grpc_asyncio", request_type=spanner.ExecuteBatchDmlRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.ExecuteBatchDmlResponse()
)
response = await client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.ExecuteBatchDmlResponse)
@pytest.mark.asyncio
async def test_execute_batch_dml_async_from_dict():
await test_execute_batch_dml_async(request_type=dict)
def test_execute_batch_dml_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteBatchDmlRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml), "__call__"
) as call:
call.return_value = spanner.ExecuteBatchDmlResponse()
client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_execute_batch_dml_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteBatchDmlRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.ExecuteBatchDmlResponse()
)
await client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [spanner.ReadRequest, dict,])
def test_read(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.read), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = result_set.ResultSet()
response = client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
def test_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.read), "__call__") as call:
client.read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
@pytest.mark.asyncio
async def test_read_async(
transport: str = "grpc_asyncio", request_type=spanner.ReadRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.read), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
result_set.ResultSet()
)
response = await client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
@pytest.mark.asyncio
async def test_read_async_from_dict():
await test_read_async(request_type=dict)
def test_read_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.read), "__call__") as call:
call.return_value = result_set.ResultSet()
client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_read_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.read), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
result_set.ResultSet()
)
await client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [spanner.ReadRequest, dict,])
def test_streaming_read(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_read), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = iter([result_set.PartialResultSet()])
response = client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, result_set.PartialResultSet)
def test_streaming_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_read), "__call__") as call:
client.streaming_read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
@pytest.mark.asyncio
async def test_streaming_read_async(
transport: str = "grpc_asyncio", request_type=spanner.ReadRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_read), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[result_set.PartialResultSet()]
)
response = await client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, result_set.PartialResultSet)
@pytest.mark.asyncio
async def test_streaming_read_async_from_dict():
await test_streaming_read_async(request_type=dict)
def test_streaming_read_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_read), "__call__") as call:
call.return_value = iter([result_set.PartialResultSet()])
client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_streaming_read_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.streaming_read), "__call__") as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[result_set.PartialResultSet()]
)
await client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [spanner.BeginTransactionRequest, dict,])
def test_begin_transaction(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction(id=b"id_blob",)
response = client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transaction.Transaction)
assert response.id == b"id_blob"
def test_begin_transaction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
client.begin_transaction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
@pytest.mark.asyncio
async def test_begin_transaction_async(
transport: str = "grpc_asyncio", request_type=spanner.BeginTransactionRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transaction.Transaction(id=b"id_blob",)
)
response = await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transaction.Transaction)
assert response.id == b"id_blob"
@pytest.mark.asyncio
async def test_begin_transaction_async_from_dict():
await test_begin_transaction_async(request_type=dict)
def test_begin_transaction_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BeginTransactionRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
call.return_value = transaction.Transaction()
client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_begin_transaction_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BeginTransactionRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transaction.Transaction()
)
await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_begin_transaction_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.begin_transaction(
session="session_value",
options=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].session
mock_val = "session_value"
assert arg == mock_val
arg = args[0].options
mock_val = transaction.TransactionOptions(read_write=None)
assert arg == mock_val
def test_begin_transaction_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.begin_transaction(
spanner.BeginTransactionRequest(),
session="session_value",
options=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.asyncio
async def test_begin_transaction_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transaction.Transaction()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.begin_transaction(
session="session_value",
options=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].session
mock_val = "session_value"
assert arg == mock_val
arg = args[0].options
mock_val = transaction.TransactionOptions(read_write=None)
assert arg == mock_val
@pytest.mark.asyncio
async def test_begin_transaction_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.begin_transaction(
spanner.BeginTransactionRequest(),
session="session_value",
options=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.parametrize("request_type", [spanner.CommitRequest, dict,])
def test_commit(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
response = client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, commit_response.CommitResponse)
def test_commit_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
client.commit()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
@pytest.mark.asyncio
async def test_commit_async(
transport: str = "grpc_asyncio", request_type=spanner.CommitRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
commit_response.CommitResponse()
)
response = await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, commit_response.CommitResponse)
@pytest.mark.asyncio
async def test_commit_async_from_dict():
await test_commit_async(request_type=dict)
def test_commit_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CommitRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
call.return_value = commit_response.CommitResponse()
client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_commit_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CommitRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
commit_response.CommitResponse()
)
await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_commit_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.commit(
session="session_value",
transaction_id=b"transaction_id_blob",
mutations=[
mutation.Mutation(insert=mutation.Mutation.Write(table="table_value"))
],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].session
mock_val = "session_value"
assert arg == mock_val
arg = args[0].mutations
mock_val = [
mutation.Mutation(insert=mutation.Mutation.Write(table="table_value"))
]
assert arg == mock_val
assert args[0].single_use_transaction == transaction.TransactionOptions(
read_write=None
)
def test_commit_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.commit(
spanner.CommitRequest(),
session="session_value",
transaction_id=b"transaction_id_blob",
mutations=[
mutation.Mutation(insert=mutation.Mutation.Write(table="table_value"))
],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.asyncio
async def test_commit_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
commit_response.CommitResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.commit(
session="session_value",
transaction_id=b"transaction_id_blob",
mutations=[
mutation.Mutation(insert=mutation.Mutation.Write(table="table_value"))
],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].session
mock_val = "session_value"
assert arg == mock_val
arg = args[0].mutations
mock_val = [
mutation.Mutation(insert=mutation.Mutation.Write(table="table_value"))
]
assert arg == mock_val
assert args[0].single_use_transaction == transaction.TransactionOptions(
read_write=None
)
@pytest.mark.asyncio
async def test_commit_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.commit(
spanner.CommitRequest(),
session="session_value",
transaction_id=b"transaction_id_blob",
mutations=[
mutation.Mutation(insert=mutation.Mutation.Write(table="table_value"))
],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.parametrize("request_type", [spanner.RollbackRequest, dict,])
def test_rollback(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_rollback_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
client.rollback()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
@pytest.mark.asyncio
async def test_rollback_async(
transport: str = "grpc_asyncio", request_type=spanner.RollbackRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_rollback_async_from_dict():
await test_rollback_async(request_type=dict)
def test_rollback_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.RollbackRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
call.return_value = None
client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_rollback_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.RollbackRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_rollback_flattened():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rollback(
session="session_value", transaction_id=b"transaction_id_blob",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].session
mock_val = "session_value"
assert arg == mock_val
arg = args[0].transaction_id
mock_val = b"transaction_id_blob"
assert arg == mock_val
def test_rollback_flattened_error():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rollback(
spanner.RollbackRequest(),
session="session_value",
transaction_id=b"transaction_id_blob",
)
@pytest.mark.asyncio
async def test_rollback_flattened_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rollback(
session="session_value", transaction_id=b"transaction_id_blob",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].session
mock_val = "session_value"
assert arg == mock_val
arg = args[0].transaction_id
mock_val = b"transaction_id_blob"
assert arg == mock_val
@pytest.mark.asyncio
async def test_rollback_flattened_error_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rollback(
spanner.RollbackRequest(),
session="session_value",
transaction_id=b"transaction_id_blob",
)
@pytest.mark.parametrize("request_type", [spanner.PartitionQueryRequest, dict,])
def test_partition_query(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_query), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.PartitionResponse()
response = client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
def test_partition_query_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_query), "__call__") as call:
client.partition_query()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
@pytest.mark.asyncio
async def test_partition_query_async(
transport: str = "grpc_asyncio", request_type=spanner.PartitionQueryRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_query), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.PartitionResponse()
)
response = await client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
@pytest.mark.asyncio
async def test_partition_query_async_from_dict():
await test_partition_query_async(request_type=dict)
def test_partition_query_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionQueryRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_query), "__call__") as call:
call.return_value = spanner.PartitionResponse()
client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_partition_query_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionQueryRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_query), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.PartitionResponse()
)
await client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [spanner.PartitionReadRequest, dict,])
def test_partition_read(request_type, transport: str = "grpc"):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_read), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.PartitionResponse()
response = client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
def test_partition_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_read), "__call__") as call:
client.partition_read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
@pytest.mark.asyncio
async def test_partition_read_async(
transport: str = "grpc_asyncio", request_type=spanner.PartitionReadRequest
):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_read), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.PartitionResponse()
)
response = await client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
@pytest.mark.asyncio
async def test_partition_read_async_from_dict():
await test_partition_read_async(request_type=dict)
def test_partition_read_field_headers():
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionReadRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_read), "__call__") as call:
call.return_value = spanner.PartitionResponse()
client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_partition_read_field_headers_async():
client = SpannerAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionReadRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.partition_read), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
spanner.PartitionResponse()
)
await client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SpannerClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SpannerClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SpannerClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SpannerGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SpannerClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SpannerGrpcTransport,)
def test_spanner_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SpannerTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_spanner_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.spanner_v1.services.spanner.transports.SpannerTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SpannerTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_session",
"batch_create_sessions",
"get_session",
"list_sessions",
"delete_session",
"execute_sql",
"execute_streaming_sql",
"execute_batch_dml",
"read",
"streaming_read",
"begin_transaction",
"commit",
"rollback",
"partition_query",
"partition_read",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_spanner_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.data",
),
quota_project_id="octopus",
)
def test_spanner_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport()
adc.assert_called_once()
def test_spanner_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpannerClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.data",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport,],
)
def test_spanner_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.data",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_spanner_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.data",
),
scopes=["1", "2"],
default_host="spanner.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport],
)
def test_spanner_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_spanner_host_no_port():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="spanner.googleapis.com"
),
)
assert client.transport._host == "spanner.googleapis.com:443"
def test_spanner_host_with_port():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="spanner.googleapis.com:8000"
),
)
assert client.transport._host == "spanner.googleapis.com:8000"
def test_spanner_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpannerGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_spanner_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpannerGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport],
)
def test_spanner_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport],
)
def test_spanner_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_database_path():
project = "squid"
instance = "clam"
database = "whelk"
expected = "projects/{project}/instances/{instance}/databases/{database}".format(
project=project, instance=instance, database=database,
)
actual = SpannerClient.database_path(project, instance, database)
assert expected == actual
def test_parse_database_path():
expected = {
"project": "octopus",
"instance": "oyster",
"database": "nudibranch",
}
path = SpannerClient.database_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_database_path(path)
assert expected == actual
def test_session_path():
project = "cuttlefish"
instance = "mussel"
database = "winkle"
session = "nautilus"
expected = "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}".format(
project=project, instance=instance, database=database, session=session,
)
actual = SpannerClient.session_path(project, instance, database, session)
assert expected == actual
def test_parse_session_path():
expected = {
"project": "scallop",
"instance": "abalone",
"database": "squid",
"session": "clam",
}
path = SpannerClient.session_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_session_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SpannerClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = SpannerClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = SpannerClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = SpannerClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = SpannerClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = SpannerClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = SpannerClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = SpannerClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SpannerClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = SpannerClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SpannerTransport, "_prep_wrapped_messages"
) as prep:
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SpannerTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SpannerClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(SpannerClient, transports.SpannerGrpcTransport),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-spanner
|
tests/unit/gapic/spanner_v1/test_spanner.py
|
Python
|
apache-2.0
| 151,706
|
[
"Octopus"
] |
8c5a36883a3cdd603c066c79657cba51b6ac347836d6d758d24be4d44552b153
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Run through NCBI vecscreen on a local machine.
"""
import os.path as op
import sys
from jcvi.utils.range import range_merge
from jcvi.formats.fasta import tidy
from jcvi.formats.blast import BlastLine
from jcvi.formats.base import must_open
from jcvi.apps.align import run_vecscreen, run_megablast
from jcvi.apps.base import OptionParser, ActionDispatcher, download, sh
def main():
actions = (
('mask', 'mask the contaminants'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def mask(args):
"""
%prog mask fastafile
Mask the contaminants. By default, this will compare against UniVec_Core and
Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can
perform FASTA tidy if requested.
"""
p = OptionParser(mask.__doc__)
p.add_option("--db",
help="Contaminant db other than Ecoli K12 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
assert op.exists(fastafile)
outfastafile = fastafile.rsplit(".", 1)[0] + ".masked.fasta"
vecbedfile = blast([fastafile])
ecoliurl = \
"ftp://ftp.ncbi.nih.gov/genomes/Bacteria/Escherichia_coli_K_12_substr__DH10B_uid58979/NC_010473.fna"
ecolifile = opts.db or download(ecoliurl, filename="Ecoli.fasta")
assert op.exists(ecolifile)
ecolibedfile = blast([fastafile, "--db={0}".format(ecolifile)])
cmd = "cat {0} {1}".format(vecbedfile, ecolibedfile)
cmd += " | mergeBed -nms -d 100 -i stdin"
cmd += " | maskFastaFromBed -fi {0} -bed stdin -fo {1}".\
format(fastafile, outfastafile)
sh(cmd)
return tidy([outfastafile])
def blast(args):
"""
%prog blast fastafile
Run BLASTN against database (default is UniVec_Core). Output .bed format
on the vector/contaminant ranges.
"""
p = OptionParser(blast.__doc__)
p.add_option("--dist", default=100, type="int",
help="Merge adjacent HSPs separated by [default: %default]")
p.add_option("--db",
help="Use a different database rather than UniVec_Core")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastaprefix = fastafile.split(".", 1)[0]
univec = opts.db or download("ftp://ftp.ncbi.nih.gov/pub/UniVec/UniVec_Core")
uniprefix = univec.split(".", 1)[0]
fastablast = fastaprefix + ".{0}.blast".format(uniprefix)
prog = run_megablast if opts.db else run_vecscreen
prog(infile=fastafile, outfile=fastablast, db=univec, pctid=95, hitlen=50)
fp = open(fastablast)
ranges = []
for row in fp:
b = BlastLine(row)
ranges.append((b.query, b.qstart, b.qstop))
merged_ranges = range_merge(ranges, dist=opts.dist)
bedfile = fastaprefix + ".{0}.bed".format(uniprefix)
fw = must_open(bedfile, "w")
for seqid, start, end in merged_ranges:
print >> fw, "\t".join(str(x) for x in (seqid, start - 1, end, uniprefix))
return bedfile
if __name__ == '__main__':
main()
|
sgordon007/jcvi_062915
|
apps/vecscreen.py
|
Python
|
bsd-2-clause
| 3,165
|
[
"BLAST"
] |
239769c4145d9c1aade0096906d3579c34a58b649b59fe1552c938577cd8ec26
|
"""This module provides a dictionary of rgb values with keys of color names.
"""
color_table = """Aero #7CB9E8 49% 73% 91% 206° 70% 70% 47% 91%
Aero blue #C9FFE5 79% 100% 90% 151° 100% 89% 21% 100%
African violet #B284BE 70% 52% 75% 288° 31% 63% 31% 75%
Air Force blue (RAF) #5D8AA8 36% 54% 66% 204° 30% 51% 45% 66%
Air Force blue (USAF) #00308F 0% 19% 56% 220° 100% 28% 100% 56%
Air superiority blue #72A0C1 45% 63% 76% 205° 39% 60% 41% 76%
Alabama Crimson #A32638 64% 15% 22% 350° 62% 39% 80% 60%
Alice blue #F0F8FF 94% 97% 100% 208° 100% 97% 6% 100%
Alizarin crimson #E32636 89% 15% 21% 355° 77% 52% 83% 89%
Alloy orange #C46210 77% 38% 6% 27° 85% 42% 92% 77%
Almond #EFDECD 94% 87% 80% 30° 52% 87% 14% 94%
Amaranth #E52B50 90% 17% 31% 348° 78% 53% 81% 90%
Amazon #3B7A57 23% 48% 34% 147° 35% 36% 52% 48%
Amber #FFBF00 100% 75% 0% 45° 100% 50% 100% 100%
SAE/ECE Amber (color) #FF7E00 100% 49% 0% 30° 100% 50% 100% 100%
American rose #FF033E 100% 1% 24% 345° 100% 51% 99% 87%
Amethyst #9966CC 60% 40% 80% 270° 50% 60% 50% 80%
Android green #A4C639 64% 78% 22% 74° 55% 50% 71% 78%
Anti-flash white #F2F3F4 95% 95% 96% 210° 8% 95% 1% 96%
Antique brass #CD9575 80% 58% 46% 22° 47% 63% 43% 80%
Antique bronze #665D1E 40% 36% 12% 53° 55% 26% 71% 40%
Antique fuchsia #915C83 57% 36% 51% 316° 22% 47% 37% 57%
Antique ruby #841B2D 52% 11% 18% 350° 66% 31% 80% 52%
Antique white #FAEBD7 98% 92% 84% 34° 78% 91% 14% 98%
Ao (English) #008000 0% 50% 0% 120° 100% 25% 100% 50%
Apple green #8DB600 55% 71% 0% 74° 100% 36% 100% 71%
Apricot #FBCEB1 98% 81% 69% 24° 90% 84% 29% 98%
Aqua #00FFFF 0% 100% 100% 160° 100% 50% 100% 100%
Aquamarine #7FFFD4 50% 100% 83% 160° 100% 75% 50% 100%
Army green #4B5320 29% 33% 13% 69° 44% 23% 61% 33%
Arsenic #3B444B 23% 27% 29% 206° 12% 26% 21% 29%
Arylide yellow #E9D66B 91% 84% 42% 51° 74% 67% 54% 91%
Ash grey #B2BEB5 70% 75% 71% 135° 9% 72% 6% 75%
Asparagus #87A96B 53% 66% 42% 93° 27% 54% 37% 66%
Atomic tangerine #FF9966 100% 60% 40% 20° 100% 70% 60% 100%
Auburn #A52A2A 65% 16% 16% 0° 59% 41% 74% 64%
Aureolin #FDEE00 99% 93% 0% 56° 100% 50% 100% 99%
AuroMetalSaurus #6E7F80 43% 50% 50% 183° 8% 47% 14% 50%
Avocado #568203 34% 51% 1% 81° 96% 26% 98% 51%
Azure #007FFF 0% 50% 100% 210° 100% 50% 100% 100%
Azure mist/web #F0FFFF 94% 100% 100% 180° 100% 97% 6% 100%
Baby blue #89CFF0 54% 81% 94% 199° 77% 74% 43% 94%
Baby blue eyes #A1CAF1 63% 79% 95% 209° 74% 79% 33% 95%
Baby pink #F4C2C2 96% 76% 76% 30° 69% 86% 21% 96%
Baby powder #FEFEFA 100% 100% 98% 60° 67% 99% 2% 100%
Baker-Miller pink #FF91AF 100% 57% 69% 344° 100% 78% 43% 100%
Ball blue #21ABCD 13% 67% 80% 192° 72% 47% 84% 80%
Banana Mania #FAE7B5 98% 91% 71% 43° 87% 85% 28% 98%
Banana yellow #FFE135 100% 88% 21% 51° 100% 60% 79% 100%
Barbie pink #E0218A 88% 13% 54% 327° 76% 50% 85% 88%
Barn red #7C0A02 49% 4% 1% 4° 97% 25% 98% 49%
Battleship grey #848482 52% 52% 51% 60° 1% 51% 2% 52%
Bazaar #98777B 60% 47% 48% 353° 14% 53% 22% 60%
Beau blue #BCD4E6 74% 83% 90% 206° 46% 82% 18% 90%
Beaver #9F8170 62% 51% 44% 22° 20% 53% 35% 63%
Beige #F5F5DC 96% 96% 86% 60° 56% 91% 10% 96%
B'dazzled Blue #2E5894 18% 35% 58% 215° 53% 38% 69% 58%
Big dip o’ruby #9C2542 61% 15% 26% 345° 62% 38% 76% 61%
Bisque #FFE4C4 100% 89% 77% 33° 100% 88% 23% 100%
Bistre #3D2B1F 24% 17% 12% 24° 33% 18% 49% 24%
Bistre brown #967117 59% 44% 9% 43° 73% 34% 85% 59%
Bitter lemon #CAE00D 79% 88% 5% 66° 89% 47% 94% 88%
Bitter lime #BFFF00 39% 55% 7% 79° 78% 31% 78% 84%
Bittersweet #FE6F5E 100% 44% 37% 6° 99% 68% 63% 100%
Bittersweet shimmer #BF4F51 75% 31% 32% 359° 47% 53% 59% 75%
Black #000000 0% 0% 0% — 0% 0% 0% 0%
Black bean #3D0C02 24% 5% 1% 10° 94% 12% 97% 24%
Black leather jacket #253529 15% 21% 16% 135° 18% 18% 6% 18%
Black olive #3B3C36 23% 24% 21% 70° 5% 22% 10% 24%
Blanched almond #FFEBCD 100% 92% 80% 36° 100% 90% 20% 100%
Blast-off bronze #A57164 65% 44% 39% 12° 27% 52% 39% 65%
Bleu de France #318CE7 19% 55% 91% 210° 79% 55% 79% 91%
Blizzard Blue #ACE5EE 67% 90% 93% 188° 66% 80% 28% 93%
Blond #FAF0BE 98% 94% 75% 50° 86% 86% 24% 98%
Blue #0000FF 0% 0% 100% 240° 100% 50% 100% 100%
Blue (Crayola) #1F75FE 12% 46% 100% 213° 99% 56% 99% 100%
Blue (Munsell) #0093AF 0% 58% 69% 190° 100% 34% 100% 68%
Blue (NCS) #0087BD 0% 53% 74% 197° 100% 37% 100% 74%
Blue (pigment) #333399 20% 20% 60% 240° 50% 40% 67% 60%
Blue (RYB) #0247FE 1% 28% 100% 224° 99% 50% 99% 99%
Blue Bell #A2A2D0 64% 64% 82% 240° 33% 73% 22% 81%
Blue-gray #6699CC 40% 60% 80% 210° 50% 60% 50% 80%
Blue-green #0D98BA 5% 60% 73% 192° 87% 39% 93% 73%
Blue sapphire #126180 7% 38% 50% 197° 75% 29% 86% 50%
Blue-violet #8A2BE2 54% 17% 89% 266° 76% 53% 81% 89%
Blue yonder #5072A7 31% 45% 65% 217° 35% 48% 52% 66%
Blueberry #4F86F7 31% 53% 97% 220° 91% 64% 68% 97%
Bluebonnet #1C1CF0 11% 11% 94% 240° 88% 53% 83% 94%
Blush #DE5D83 87% 36% 51% 342° 66% 62% 58% 87%
Bole #79443B 47% 27% 23% 30° 34% 35% 24% 34%
Bondi blue #0095B6 0% 58% 71% 191° 100% 36% 100% 71%
Bone #E3DAC9 89% 85% 79% 48° 32% 84% 30% 95%
Boston University Red #CC0000 80% 0% 0% 0° 100% 40% 100% 80%
Bottle green #006A4E 0% 42% 31% 164° 100% 21% 100% 41%
Boysenberry #873260 53% 20% 38% 328° 46% 36% 63% 53%
Brandeis blue #0070FF 0% 44% 100% 214° 100% 50% 100% 100%
Brass #B5A642 71% 65% 26% 52° 47% 48% 64% 71%
Brick red #CB4154 80% 25% 33% 352° 57% 53% 68% 80%
Bright cerulean #1DACD6 11% 67% 84% 194° 76% 48% 86% 84%
Bright green #66FF00 40% 100% 0% 96° 100% 50% 100% 100%
Bright lavender #BF94E4 75% 58% 89% 272° 60% 74% 35% 89%
Bright maroon #C32148 76% 13% 28% 345° 71% 45% 75% 38%
Bright pink #FF007F 100% 0% 50% 330° 100% 50% 100% 100%
Bright turquoise #08E8DE 3% 91% 87% 177° 93% 47% 97% 91%
Bright ube #D19FE8 82% 62% 91% 281° 61% 77% 31% 91%
Brilliant lavender #F4BBFF 96% 73% 100% 290° 100% 87% 27% 100%
Brilliant rose #FF55A3 100% 33% 64% 332° 100% 67% 67% 100%
Brink pink #FB607F 98% 38% 50% 348° 95% 68% 62% 98%
British racing green #004225 0% 26% 15% 154° 100% 13% 100% 26%
Bronze #CD7F32 80% 50% 20% 30° 61% 50% 76% 80%
Bronze Yellow #737000 45% 44% 0% 58° 100% 23% 100% 45%
Brown (traditional) #964B00 59% 29% 0% 30° 100% 29% 100% 59%
Brown (web) #A52A2A 65% 16% 16% 0° 59% 41% 75% 65%
Brown-nose #6B4423 40% 27% 14% 28° 49% 27% 67% 42%
Brunswick green #1B4D3E 11% 30% 24% 162° 48% 20% 65% 30%
Bubble gum #FFC1CC 100% 76% 80% 349° 100% 88% 23% 99%
Bubbles #E7FEFF 91% 100% 100% 183° 100% 95% 9% 100%
Buff #F0DC82 94% 86% 51% 49° 79% 73% 46% 94%
Bulgarian rose #480607 28% 2% 3% 359° 85% 15% 92% 28%
Burgundy #800020 50% 0% 13% 345° 100% 25% 100% 50%
Burlywood #DEB887 87% 72% 53% 34° 57% 70% 39% 87%
Burnt orange #CC5500 80% 33% 0% 25° 100% 40% 100% 80%
Burnt sienna #E97451 91% 45% 32% 14° 78% 62% 65% 91%
Burnt umber #8A3324 54% 20% 14% 9° 59% 34% 74% 54%
Byzantine #BD33A4 74% 20% 64% 311° 58% 47% 73% 74%
Byzantium #702963 44% 16% 39% 311° 46% 30% 63% 44%
Cadet #536872 33% 41% 45% 206° 16% 39% 31% 47%
Cadet blue #5F9EA0 37% 62% 63% 182° 26% 50% 41% 63%
Cadet grey #91A3B0 57% 64% 69% 205° 16% 63% 18% 69%
Cadmium green #006B3C 0% 42% 24% 154° 100% 21% 100% 42%
Cadmium orange #ED872D 93% 53% 18% 28° 84% 55% 81% 93%
Cadmium red #E30022 89% 0% 13% 351° 100% 45% 100% 89%
Cadmium yellow #FFF600 100% 96% 0% 34° 100% 50% 93% 100%
Café au lait #A67B5B 65% 48% 36% 26° 30% 50% 45% 65%
Café noir #4B3621 29% 21% 13% 30° 39% 21% 56% 29%
Cal Poly green #1E4D2B 12% 30% 17% 137° 44% 21% 61% 30%
Cambridge Blue #A3C1AD 64% 76% 68% 140° 20% 70% 16% 76%
Camel #C19A6B 76% 60% 42% 33° 41% 59% 45% 76%
Cameo pink #EFBBCC 94% 73% 80% 340° 62% 84% 22% 94%
Camouflage green #78866B 47% 53% 42% 91° 11% 47% 20% 53%
Canary yellow #FFEF00 100% 94% 0% 56° 100% 50% 100% 100%
Candy apple red #FF0800 100% 3% 0% 2° 100% 50% 100% 100%
Candy pink #E4717A 89% 44% 48% 355° 68% 67% 50% 89%
Capri #00BFFF 0% 75% 100% 195° 100% 50% 100% 100%
Caput mortuum #592720 35% 15% 13% 7° 47% 24% 64% 35%
Cardinal #C41E3A 77% 12% 23% 350° 74% 44% 85% 77%
Caribbean green #00CC99 0% 80% 60% 150° 100% 40% 100% 44%
Carmine #960018 59% 0% 9% 350° 100% 29% 100% 59%
Carmine (M&P) #D70040 84% 0% 25% 342° 100% 42% 100% 84%
Carmine pink #EB4C42 92% 30% 26% 4° 81% 59% 72% 92%
Carmine red #FF0038 100% 0% 22% 347° 100% 50% 100% 100%
Carnation pink #FFA6C9 100% 65% 79% 336° 100% 83% 35% 100%
Carnelian #B31B1B 70% 11% 11% 0° 74% 40% 85% 70%
Carolina blue #99BADD 60% 73% 87% 211° 50% 73% 31% 87%
Carrot orange #ED9121 93% 57% 13% 33° 85% 53% 86% 93%
Castleton green #00563F 0% 34% 25% 164° 100% 17% 100% 34%
Catalina blue #062A78 2% 16% 47% 221° 91% 25% 95% 47%
Catawba #703642 44% 21% 26% 348° 35% 33% 52% 44%
Cedar Chest #C95A49 79% 35% 29% 8° 54% 54% 64% 79%
Ceil #92A1CF 57% 63% 81% 225° 39% 69% 30% 81%
Celadon #ACE1AF 67% 88% 69% 174° 47% 78% 47% 73%
Celadon blue #007BA7 0% 48% 65% 196° 100% 33% 100% 65%
Celadon green #2F847C 18% 52% 49% 123° 48% 35% 24% 88%
Celeste (colour) #B2FFFF 70% 100% 100% 180° 100% 85% 30% 100%
Celestial blue #4997D0 29% 59% 82% 205° 59% 55% 65% 81%
Cerise #DE3163 87% 19% 39% 343° 72% 53% 78% 87%
Cerise pink #EC3B83 93% 23% 51% 336° 82% 58% 75% 93%
Cerulean #007BA7 0% 48% 65% 196° 100% 33% 100% 65%
Cerulean blue #2A52BE 16% 32% 75% 224° 64% 46% 78% 75%
Cerulean frost #6D9BC3 43% 61% 76% 208° 42% 60% 44% 77%
CG Blue #007AA5 0% 48% 65% 196° 100% 32% 100% 65%
CG Red #E03C31 88% 24% 19% 4° 74% 54% 78% 88%
Chamoisee #A0785A 63% 47% 35% 26° 28% 49% 44% 63%
Champagne #F7E7CE 97% 91% 81% 37° 72% 89% 17% 97%
Charcoal #36454F 21% 27% 31% 204° 19% 26% 31% 31%
Charleston green #232B2B 14% 17% 17% 180° 10% 15% 19% 17%
Charm pink #E68FAC 90% 56% 67% 333° 64% 73% 50% 87%
Chartreuse (traditional) #DFFF00 87% 100% 0% 68° 100% 50% 100% 100%
Chartreuse (web) #7FFF00 50% 100% 0% 90° 100% 50% 100% 100%
Cherry #DE3163 87% 19% 39% 343° 72% 53% 78% 87%
Cherry blossom pink #FFB7C5 100% 72% 77% 348° 100% 86% 28% 100%
Chestnut #954535 56% 27% 21% 10° 46% 39% 54% 68%
China pink #DE6FA1 87% 44% 63% 333° 63% 65% 50% 87%
China rose #A8516E 66% 32% 43% 340° 35% 49% 52% 66%
Chinese red #AA381E 67% 22% 12% 11° 70% 39% 82% 67%
Chinese violet #856088 52% 38% 53% 296° 17% 46% 29% 53%
Chocolate (traditional) #7B3F00 48% 25% 0% 31° 100% 24% 100% 48%
Chocolate (web) #D2691E 82% 41% 12% 25° 75% 47% 86% 82%
Chrome yellow #FFA700 100% 65% 0% 40° 100% 50% 100% 100%
Cinereous #98817B 60% 51% 48% 12° 12% 54% 19% 60%
Cinnabar #E34234 89% 26% 20% 5° 76% 55% 77% 89%
Cinnamon #D2691E 82% 41% 12% 25° 75% 47% 86% 82%
Citrine #E4D00A 89% 82% 4% 54° 92% 47% 96% 89%
Citron #9FA91F 62% 66% 12% 64° 69% 39% 82% 66%
Claret #7F1734 50% 9% 20% 343° 69% 29% 82% 50%
Classic rose #FBCCE7 98% 80% 91% 333° 86% 89% 100% 20%
Cobalt #0047AB 0% 28% 67% 215° 100% 34% 100% 67%
Cocoa brown #D2691E 82% 41% 12% 25° 75% 47% 86% 82%
Coconut #965A3E 59% 35% 24% 19° 42% 42% 59% 59%
Coffee #6F4E37 44% 31% 22% 25° 34% 33% 51% 44%
Columbia blue #9BDDFF 61% 87% 100% 200° 100% 80% 39% 100%
Congo pink #F88379 97% 51% 47% 5° 90% 72% 51% 97%
Cool black #002E63 0% 18% 39% 212° 100% 19% 100% 39%
Cool grey #8C92AC 55% 57% 67% 229° 16% 61% 19% 68%
Copper #B87333 72% 45% 20% 29° 57% 46% 72% 72%
Copper (Crayola) #DA8A67 85% 54% 40% 18° 61% 63% 53% 85%
Copper penny #AD6F69 68% 44% 41% 5° 29% 55% 39% 68%
Copper red #CB6D51 80% 43% 32% 14° 54% 56% 60% 80%
Copper rose #996666 60% 40% 40% 0° 20% 50% 33% 60%
Coquelicot #FF3800 100% 22% 0% 13° 100% 50% 100% 100%
Coral #FF7F50 100% 50% 31% 16° 100% 66% 69% 100%
Coral pink #F88379 97% 51% 47% 5° 90% 72% 51% 97%
Coral red #FF4040 100% 25% 25% 0° 100% 63% 75% 100%
Cordovan #893F45 54% 25% 27% 337° 37% 39% 89% 94%
Corn #FBEC5D 98% 93% 36% 54° 95% 68% 63% 98%
Cornell Red #B31B1B 70% 11% 11% 0° 74% 40% 85% 70%
Cornflower blue #6495ED 39% 58% 93% 219° 79% 66% 58% 93%
Cornsilk #FFF8DC 100% 97% 86% 48° 100% 93% 14% 100%
Cosmic latte #FFF8E7 100% 97% 91% 42° 100% 95% 9% 100%
Cotton candy #FFBCD9 100% 74% 85% 334° 100% 87% 26% 100%
Cream #FFFDD0 100% 99% 82% 57° 100% 91% 18% 100%
Crimson #DC143C 86% 8% 24% 348° 83% 47% 91% 86%
Crimson glory #BE0032 75% 0% 20% 356° 100% 37% 100% 75%
Cyan #00FFFF 0% 100% 100% 180° 100% 50% 100% 100%
Cyan (process) #00B7EB 0% 72% 92% 193° 100% 46% 100% 92%
Cyber grape #58427C 35% 26% 49% 263° 31% 37% 47% 49%
Cyber yellow #FFD300 100% 83% 0% 50° 100% 50% 100% 100%
Daffodil #FFFF31 100% 100% 19% 60° 100% 60% 81% 100%
Dandelion #F0E130 94% 88% 19% 55° 87% 57% 80% 94%
Dark blue #00008B 0% 0% 55% 240° 100% 27% 100% 55%
Dark blue-gray #666699 40% 40% 60% 240° 20% 50% 33% 60%
Dark brown #654321 40% 26% 13% 30° 51% 26% 67% 40%
Dark byzantium #5D3954 36% 22% 33% 315° 24% 29% 39% 37%
Dark candy apple red #A40000 64% 0% 0% 0° 100% 32% 100% 64%
Dark cerulean #08457E 3% 27% 49% 209° 88% 26% 94% 49%
Dark chestnut #986960 60% 41% 38% 10° 23% 49% 37% 60%
Dark coral #CD5B45 80% 36% 27% 10° 58% 54% 66% 80%
Dark cyan #008B8B 0% 55% 55% 180° 100% 27% 24% 100%
Dark electric blue #536878 33% 41% 47% 180° 18% 40% 20% 25%
Dark goldenrod #B8860B 72% 53% 4% 43° 89% 38% 94% 72%
Dark gray #A9A9A9 66% 66% 66% — 0% 66% 0% 66%
Dark green #013220 0% 20% 13% 158° 96% 10% 98% 20%
Dark imperial blue #00416A 0% 25% 42% 203° 100% 21% 100% 42%
Dark jungle green #1A2421 10% 14% 13% 120° 16% 12% 10% 10%
Dark khaki #BDB76B 74% 72% 42% 56° 38% 58% 43% 74%
Dark lava #483C32 28% 24% 20% 27° 18% 24% 31% 28%
Dark lavender #734F96 45% 31% 59% 270° 31% 45% 47% 59%
Dark liver #534B4F 33% 29% 31% 330° 5% 31% 10% 33%
Dark liver (horses) #543D37 33% 24% 22% 12° 21% 27% 35% 33%
Dark magenta #8B008B 55% 0% 55% 300° 100% 27% 100% 55%
Dark midnight blue #003366 0% 20% 40% 210° 100% 20% 100% 40%
Dark moss green #4A5D23 29% 36% 14% 80° 45% 25% 62% 37%
Dark olive green #556B2F 33% 42% 18% 82° 39% 30% 56% 42%
Dark orange #FF8C00 100% 55% 0% 34° 100% 50% 100% 94%
Dark orchid #9932CC 60% 20% 80% 280° 61% 50% 75% 80%
Dark pastel blue #779ECB 47% 62% 80% 212° 45% 63% 41% 80%
Dark pastel green #03C03C 1% 75% 24% 138° 97% 38% 98% 75%
Dark pastel purple #966FD6 59% 44% 84% 263° 56% 64% 48% 84%
Dark pastel red #C23B22 76% 23% 13% 9° 70% 45% 82% 76%
Dark pink #E75480 91% 33% 50% 342° 75% 62% 64% 91%
Dark powder blue #003399 0% 20% 60% 220° 100% 30% 70% 60%
Dark raspberry #872657 53% 15% 34% 330° 56% 34% 72% 53%
Dark red #8B0000 55% 0% 0% 0° 100% 27% 100% 56%
Dark salmon #E9967A 91% 59% 48% 15° 72% 70% 48% 91%
Dark scarlet #560319 34% 1% 10% 344° 93% 18% 97% 34%
Dark sea green #8FBC8F 56% 74% 56% 120° 25% 65% 24% 74%
Dark sienna #3C1414 24% 8% 8% 0° 50% 16% 67% 24%
Dark sky blue #8CBED6 55% 75% 84% 199° 47% 69% 35% 84%
Dark slate blue #483D8B 28% 24% 55% 248° 39% 39% 56% 55%
Dark slate gray #2F4F4F 18% 31% 31% 180° 25% 25% 41% 31%
Dark spring green #177245 9% 45% 27% 150° 66% 27% 80% 45%
Dark tan #918151 57% 51% 32% 45° 28% 44% 44% 57%
Dark tangerine #FFA812 100% 66% 7% 38° 100% 54% 93% 100%
Dark taupe #483C32 28% 24% 20% 30° 18% 24% 17% 34%
Dark terra cotta #CC4E5C 80% 31% 36% 354° 55% 55% 55% 55%
Dark turquoise #00CED1 0% 81% 82% 181° 100% 41% 100% 82%
Dark vanilla #D1BEA8 82% 75% 66% 32° 31% 74% 20% 82%
Dark violet #9400D3 58% 0% 83% 282° 100% 41% 100% 83%
Dark yellow #9B870C 61% 53% 5% 295° 86% 33% 92% 61%
Dartmouth green #00703C 0% 44% 24% 121° 100% 22% 90% 50%
Davy's grey #555555 33% 33% 33% — 0% 33% 0% 33%
Debian red #D70A53 84% 4% 33% 339° 91% 44% 95% 84%
Deep carmine #A9203E 66% 13% 24% 357° 68% 39% 100% 66%
Deep carmine pink #EF3038 94% 19% 22% 357° 86% 56% 80% 94%
Deep carrot orange #E9692C 91% 41% 17% 34° 81% 54% 76% 84%
Deep cerise #DA3287 85% 20% 53% 330° 69% 53% 77% 85%
Deep champagne #FAD6A5 98% 84% 65% 35° 90% 81% 34% 98%
Deep chestnut #B94E48 73% 31% 28% 3° 45% 50% 61% 73%
Deep coffee #704241 44% 26% 25% 1° 27% 35% 42% 44%
Deep fuchsia #C154C1 76% 33% 76% 300° 47% 54% 56% 76%
Deep jungle green #004B49 0% 29% 29% 120° 100% 15% 40% 40%
Deep lemon #F5C71A 96% 78% 30% 47° 89% 63% 89% 96%
Deep lilac #9955BB 60% 33% 73% 280° 43% 53% 55% 73%
Deep magenta #CC00CC 80% 0% 80% 300° 100% 40% 100% 80%
Deep mauve #D473D4 83% 45% 83% 300° 53% 64% 46% 83%
Deep moss green #355E3B 21% 37% 23% 129° 28% 29% 44% 37%
Deep peach #FFCBA4 100% 80% 64% 26° 100% 82% 36% 100%
Deep pink #FF1493 100% 8% 58% 328° 100% 54% 92% 100%
Deep ruby #843F5B 52% 25% 36% 336° 35% 38% 52% 52%
Deep saffron #FF9933 100% 60% 20% 30° 100% 60% 80% 100%
Deep sky blue #00BFFF 0% 75% 100% 195° 100% 50% 100% 100%
Deep Space Sparkle #4A646C 29% 39% 42% 194° 19% 36% 32% 43%
Deep Taupe #7E5E60 49% 37% 38% 356° 15% 43% 25% 49%
Deep Tuscan red #66424D 40% 26% 30% 342° 21% 33% 35% 40%
Deer #BA8759 73% 53% 35% 28° 41% 54% 52% 73%
Denim #1560BD 8% 38% 74% 213° 80% 41% 89% 74%
Desert #C19A6B 76% 60% 42% 33° 41% 59% 44% 76%
Desert sand #EDC9AF 93% 79% 69% 25° 63% 81% 26% 93%
Diamond #B9F2FF 73% 95% 100% 190° 100% 86% 100% 100%
Dim gray #696969 41% 41% 41% — 0% 41% 0% 41%
Dirt #9B7653 61% 46% 33% 29° 30% 47% 47% 61%
Dodger blue #1E90FF 12% 56% 100% 210° 100% 56% 88% 100%
Dogwood rose #D71868 84% 9% 41% 330° 80% 47% 84% 82%
Dollar bill #85BB65 52% 73% 40% 98° 39% 57% 46% 73%
Donkey Brown #664C28 40% 30% 16% 35° 44% 28% 61% 40%
Drab #967117 59% 44% 9% 43° 73% 34% 85% 59%
Duke blue #00009C 0% 0% 61% 240° 100% 31% 100% 61%
Dust storm #E5CCC9 90% 80% 79% 6° 35% 84% 12% 90%
Earth yellow #E1A95F 88% 66% 37% 34° 68% 63% 58% 88%
Ebony #555D50 33% 36% 31% 97° 8% 34% 14% 37%
Ecru #C2B280 76% 70% 50% 45° 35% 63% 34% 76%
Eggplant #614051 38% 25% 32% 329° 21% 32% 34% 38%
Eggshell #F0EAD6 94% 92% 84% 46° 46% 89% 11% 94%
Egyptian blue #1034A6 6% 20% 65% 226° 82% 36% 90% 65%
Electric blue #7DF9FF 49% 98% 100% 183° 100% 75% 51% 100%
Electric crimson #FF003F 100% 0% 25% 345° 100% 50% 100% 100%
Electric cyan #00FFFF 0% 100% 100% 180° 100% 50% 100% 100%
Electric green #00FF00 0% 100% 0% 120° 100% 50% 100% 100%
Electric indigo #6F00FF 44% 0% 100% 266° 100% 50% 100% 100%
Electric lavender #F4BBFF 96% 73% 100% 290° 100% 87% 27% 100%
Electric lime #CCFF00 80% 100% 0% 72° 100% 50% 100% 100%
Electric purple #BF00FF 75% 0% 100% 285° 100% 50% 100% 100%
Electric ultramarine #3F00FF 25% 0% 100% 255° 100% 50% 100% 100%
Electric violet #8F00FF 56% 0% 100% 274° 100% 50% 100% 100%
Electric yellow #FFFF33 100% 100% 20% 60° 100% 60% 80% 100%
Emerald #50C878 31% 78% 47% 140° 52% 55% 60% 78%
English green #1B4D3E 11% 30% 24% 162° 48% 20% 65% 30%
English lavender #B48395 71% 51% 58% 338° 25% 61% 27% 71%
English red #AB4B52 67% 31% 32% 357° 37% 49% 54% 67%
English violet #563C5C 34% 24% 36% 289° 21% 30% 35% 36%
Eton blue #96C8A2 59% 78% 64% 134° 31% 69% 25% 78%
Eucalyptus #44D7A8 27% 84% 66% 161° 65% 56% 68% 84%
Fallow #C19A6B 76% 60% 42% 45° 41% 59% 17% 23%
Falu red #801818 50% 9% 9% 0° 68% 30% 81% 50%
Fandango #B53389 71% 20% 54% 320° 56% 46% 72% 71%
Fandango pink #DE5285 87% 32% 52% 342° 68% 60% 63% 87%
Fashion fuchsia #F400A1 96% 0% 63% 320° 100% 48% 100% 96%
Fawn #E5AA70 90% 67% 44% 30° 69% 67% 51% 90%
Feldgrau #4D5D53 30% 36% 33% 142° 9% 33% 17% 36%
Feldspar #FDD5B1 88% 71% 51% 33° 60% 69% 60% 56%
Fern green #4F7942 31% 47% 26% 106° 29% 37% 45% 47%
Ferrari Red #FF2800 100% 16% 0% 9° 100% 50% 100% 100%
Field drab #6C541E 42% 33% 12% 42° 57% 27% 72% 42%
Firebrick #B22222 70% 13% 13% 0° 68% 42% 81% 70%
Fire engine red #CE2029 81% 13% 16% 0° 73% 47% 92% 80%
Flame #E25822 89% 35% 13% 17° 77% 51% 85% 89%
Flamingo pink #FC8EAC 99% 56% 67% 344° 95% 77% 44% 99%
Flattery #6B4423 40% 27% 14% 28° 49% 27% 67% 42%
Flavescent #F7E98E 97% 91% 56% 52° 87% 76% 41% 76%
Flax #EEDC82 93% 86% 51% 50° 76% 72% 45% 93%
Flirt #A2006D 64% 0% 43% 320° 100% 32% 100% 64%
Floral white #FFFAF0 100% 98% 94% 40° 100% 97% 6% 100%
Fluorescent orange #FFBF00 100% 75% 0% 45° 100% 50% 100% 100%
Fluorescent pink #FF1493 100% 8% 58% 328° 100% 54% 92% 100%
Fluorescent yellow #CCFF00 80% 100% 0% 72° 100% 50% 100% 100%
Folly #FF004F 100% 0% 31% 341° 100% 50% 100% 100%
Forest green (traditional) #014421 0% 27% 13% 149° 97% 14% 99% 27%
Forest green (web) #228B22 13% 55% 13% 120° 61% 34% 76% 55%
French beige #A67B5B 65% 48% 36% 26° 30% 50% 45% 65%
French bistre #856D4D 52% 43% 30% 34° 27% 41% 42% 52%
French blue #0072BB 0% 45% 73% 203° 100% 37% 100% 73%
French lilac #86608E 53% 38% 56% 290° 19% 47% 32% 56%
French lime #9EFD38 62% 99% 22% 89° 98% 61% 78% 99%
French mauve #D473D4 83% 45% 83% 300° 53% 64% 46% 83%
French raspberry #C72C48 78% 17% 28% 351° 64% 48% 78% 78%
French rose #F64A8A 96% 29% 54% 338° 91% 63% 70% 96%
French sky blue #77B5FE 47% 71% 100% 213° 99% 73% 53% 100%
French wine #AC1E44 67% 12% 27% 344° 70% 40% 83% 68%
Fresh Air #A6E7FF 65% 91% 100% 196° 100% 83% 35% 100%
Fuchsia #FF00FF 100% 0% 100% 321° 100% 50% 100% 100%
Fuchsia (Crayola) #C154C1 76% 33% 76% 300° 47% 54% 56% 76%
Fuchsia pink #FF77FF 100% 47% 100% 313° 100% 73% 53% 100%
Fuchsia rose #C74375 78% 26% 46% 337° 54% 52% 66% 78%
Fulvous #E48400 89% 52% 0% 35° 100% 45% 100% 89%
Fuzzy Wuzzy #CC6666 80% 40% 40% 0° 50% 60% 50% 80%
Gainsboro #DCDCDC 86% 86% 86% — 0% 86% 0% 86%
Gamboge #E49B0F 89% 61% 6% 38° 88% 48% 94% 94%
Ghost white #F8F8FF 97% 97% 100% 24° 100% 99% 3% 100%
Giants orange #FE5A1D 100% 35% 11% 16° 99% 56% 89% 100%
Ginger #B06500 69% 40% 0% 24° 100% 35% 100% 69%
Glaucous #6082B6 38% 51% 71% 216° 37% 55% 47% 71%
Glitter #E6E8FA 90% 91% 98% 234° 67% 94% 8% 98%
GO green #00AB66 0% 67% 40% 156° 100% 34% 100% 75%
Gold (metallic) #D4AF37 83% 69% 22% 46° 65% 52% 74% 83%
Gold (web) (Golden) #FFD700 100% 84% 0% 51° 100% 50% 100% 100%
Gold Fusion #85754E 52% 46% 31% 43° 26% 41% 41% 52%
Golden brown #996515 60% 40% 8% 36° 76% 34% 83% 60%
Golden poppy #FCC200 99% 76% 0% 46° 100% 49% 100% 99%
Golden yellow #FFDF00 100% 87% 0% 52° 100% 50% 100% 100%
Goldenrod #DAA520 85% 65% 13% 43° 74% 49% 85% 85%
Granny Smith Apple #A8E4A0 66% 89% 63% 113° 56% 76% 30% 89%
Grape #6F2DA8 42% 18% 66% 270° 58% 42% 73% 66%
Gray #808080 50% 50% 50% — 0% 50% 0% 50%
Gray (HTML/CSS gray) #808080 50% 50% 50% — 0% 50% 0% 50%
Gray (X11 gray) #BEBEBE 75% 75% 75% — 0% 75% 0% 75%
Gray-asparagus #465945 27% 35% 27% 117° 13% 31% 22% 35%
Gray-blue #8C92AC 55% 57% 67% 229° 16% 61% 19% 68%
Green (color wheel) (X11 green) #00FF00 0% 100% 0% 120° 100% 50% 100% 100%
Green (Crayola) #1CAC78 11% 67% 47% 159° 72% 39% 72% 78%
Green (HTML/CSS color) #008000 0% 50% 0% 120° 100% 25% 100% 50%
Green (Munsell) #00A877 0% 66% 47% 163° 100% 33% 100% 66%
Green (NCS) #009F6B 0% 62% 42% 160° 100% 31% 100% 62%
Green (pigment) #00A550 0% 65% 31% 149° 100% 32% 100% 65%
Green (RYB) #66B032 40% 69% 20% 95° 56% 44% 72% 69%
Green-yellow #ADFF2F 68% 100% 18% 84° 100% 59% 82% 100%
Grullo #A99A86 66% 60% 53% 34° 17% 59% 21% 66%
Guppie green #00FF7F 0% 100% 50% 150° 100% 50% 100% 100%
Halayà úbe #663854 40% 22% 33% 278° 30% 31% 12% 37%
Han blue #446CCF 27% 42% 81% 223° 59% 54% 67% 81%
Han purple #5218FA 32% 9% 98% 255° 96% 54% 90% 98%
Hansa yellow #E9D66B 91% 84% 42% 51° 74% 67% 54% 91%
Harlequin #3FFF00 25% 100% 0% 105° 100% 50% 100% 100%
Harvard crimson #C90016 79% 0% 9% 353° 100% 39% 100% 79%
Harvest gold #DA9100 85% 57% 0% 40° 100% 43% 100% 86%
Heart Gold #808000 50% 50% 0% 43° 100% 25% 100% 25%
Heliotrope #DF73FF 87% 45% 100% 286° 100% 73% 55% 100%
Hollywood cerise #F400A1 96% 0% 63% 320° 100% 48% 100% 96%
Honeydew #F0FFF0 94% 100% 94% 150° 100% 97% 97% 97%
Honolulu blue #006DB0 0% 43% 69% 203° 100% 35% 100% 69%
Hooker's green #49796B 29% 47% 42% 163° 25% 38% 40% 48%
Hot magenta #FF1DCE 100% 11% 81% 313° 100% 56% 89% 100%
Hot pink #FF69B4 100% 41% 71% 330° 100% 71% 59% 100%
Hunter green #355E3B 21% 37% 23% 129° 28% 29% 44% 37%
Iceberg #71A6D2 44% 65% 82% 207° 52% 63% 46% 82%
Icterine #FCF75E 99% 97% 37% 58° 96% 68% 63% 99%
Illuminating Emerald #319177 19% 57% 47% 164° 50% 38% 66% 57%
Imperial #602F6B 38% 18% 42% 289° 39% 30% 56% 42%
Imperial blue #002395 0% 14% 58% 226° 100% 29% 100% 58%
Imperial purple #66023C 40% 1% 24% 325° 96% 20% 98% 40%
Imperial red #ED2939 93% 16% 22% 355° 85% 55% 83% 93%
Inchworm #B2EC5D 70% 93% 36% 84° 79% 65% 61% 93%
India green #138808 7% 53% 3% 115° 89% 28% 94% 53%
Indian red #CD5C5C 80% 36% 36% 0° 53% 58% 52% 75%
Indian yellow #E3A857 89% 66% 34% 35° 71% 62% 62% 89%
Indigo #6F00FF 44% 0% 100% 266° 100% 50% 100% 100%
Indigo (dye) #00416A 0% 25% 42% 203° 100% 21% 100% 42%
Indigo (web) #4B0082 29% 0% 51% 275° 100% 26% 100% 50%
International Klein Blue #002FA7 0% 18% 65% 223° 100% 33% 100% 65%
International orange (aerospace) #FF4F00 100% 31% 0% 19° 100% 50% 100% 100%
International orange (engineering) #BA160C 73% 9% 5% 3° 88% 39% 94% 73%
International orange (Golden Gate Bridge) #C0362C 75% 21% 17% 4° 63% 46% 77% 75%
Iris #5A4FCF 35% 31% 81% 245° 57% 56% 62% 81%
Irresistible #B3446C 70% 27% 42% 338° 45% 48% 62% 70%
Isabelline #F4F0EC 96% 94% 93% 30° 27% 94% 3% 96%
Islamic green #009000 0% 56% 0% 120° 100% 28% 100% 56%
Italian sky blue #B2FFFF 70% 100% 100% 180° 100% 85% 30% 100%
Ivory #FFFFF0 100% 100% 94% 60° 100% 97% 6% 100%
Jade #00A86B 0% 66% 42% 158° 100% 33% 100% 66%
Japanese indigo #264348 15% 26% 28% 189° 31% 22% 47% 28%
Japanese violet #5B3256 36% 20% 34% 307° 29% 28% 45% 36%
Jasmine #F8DE7E 97% 87% 49% 47° 90% 73% 49% 97%
Jasper #D73B3E 84% 23% 24% 359° 66% 54% 73% 84%
Jazzberry jam #A50B5E 65% 4% 37% 322° 88% 35% 90% 47%
Jelly Bean #DA614E 85% 38% 31% 8° 65% 58% 64% 86%
Jet #343434 20% 20% 20% — 0% 20% 0% 20%
Jonquil #F4CA16 96% 79% 9% 49° 91% 52% 91% 96%
June bud #BDDA57 74% 85% 34% 80° 64% 60% 75% 85%
Jungle green #29AB87 16% 67% 53% 163° 61% 42% 76% 67%
Kelly green #4CBB17 30% 73% 9% 101° 78% 41% 88% 73%
Kenyan copper #7C1C05 49% 11% 2% 12° 92% 25% 96% 49%
Keppel #3AB09E 23% 69% 62% 171° 50% 46% 67% 69%
Khaki (HTML/CSS) (Khaki) #C3B091 76% 69% 57% 37° 29% 67% 26% 76%
Khaki (X11) (Light khaki) #F0E68C 94% 90% 55% 54° 77% 75% 42% 94%
Kobe #882D17 53% 18% 9% 12° 71% 31% 83% 53%
Kobi #E79FC4 91% 62% 77% 329° 60% 77% 31% 91%
KU Crimson #E8000D 91% 0% 5% 357° 100% 46% 100% 91%
La Salle Green #087830 3% 47% 19% 141° 88% 25% 93% 47%
Languid lavender #D6CADD 84% 79% 87% 270° 22% 83% 17% 82%
Lapis lazuli #26619C 15% 38% 61% 210° 61% 38% 76% 61%
Laser Lemon #FFFF66 100% 100% 40% 60° 100% 70% 60% 100%
Laurel green #A9BA9D 66% 73% 62% 95° 17% 67% 16% 73%
Lava #CF1020 81% 6% 13% 355° 86% 44% 92% 81%
Lavender (floral) #B57EDC 71% 49% 86% 275° 57% 68% 43% 86%
Lavender (web) #E6E6FA 90% 90% 98% 245° 67% 94% 8% 98%
Lavender blue #CCCCFF 80% 80% 100% 240° 100% 90% 20% 100%
Lavender blush #FFF0F5 100% 94% 96% 340° 100% 97% 6% 100%
Lavender gray #C4C3D0 77% 76% 82% 245° 12% 79% 6% 82%
Lavender indigo #9457EB 58% 34% 92% 265° 79% 63% 63% 92%
Lavender magenta #EE82EE 93% 51% 93% 300° 76% 72% 45% 93%
Lavender mist #E6E6FA 90% 90% 98% 240° 67% 94% 8% 98%
Lavender pink #FBAED2 98% 68% 82% 332° 91% 83% 31% 98%
Lavender purple #967BB6 59% 48% 71% 267° 29% 60% 32% 71%
Lavender rose #FBA0E3 98% 63% 89% 316° 92% 81% 36% 98%
Lawn green #7CFC00 49% 99% 0% 90° 100% 49% 98% 48%
Lemon #FFF700 100% 97% 0% 58° 100% 50% 100% 100%
Lemon chiffon #FFFACD 100% 98% 80% 54° 100% 90% 20% 100%
Lemon curry #CCA01D 80% 63% 11% 45° 75% 46% 86% 80%
Lemon glacier #FDFF00 99% 100% 0% 60° 100% 50% 100% 100%
Lemon lime #E3FF00 89% 100% 0% 44° 100% 50% 100% 100%
Lemon meringue #F6EABE 96% 92% 75% 47° 76% 86% 23% 97%
Lemon yellow #FFF44F 100% 96% 31% 56° 100% 66% 69% 100%
Licorice #1A1110 10% 7% 6% 6° 24% 8% 39% 10%
Light apricot #FDD5B1 99% 84% 69% 30° 95% 84% 22% 89%
Light blue #ADD8E6 68% 85% 90% 194° 53% 79% 24% 90%
Light brown #B5651D 71% 40% 11% 28° 72% 41% 84% 71%
Light carmine pink #E66771 90% 40% 44% 350° 72% 65% 70% 80%
Light coral #F08080 94% 50% 50% 0° 79% 72% 50% 100%
Light cornflower blue #93CCEA 58% 80% 92% 201° 67% 75% 37% 92%
Light crimson #F56991 96% 41% 57% 343° 88% 69% 57% 96%
Light cyan #E0FFFF 88% 100% 100% 180° 100% 94% 12% 100%
Light fuchsia pink #F984EF 98% 52% 94% 300° 91% 75% 27% 94%
Light goldenrod yellow #FAFAD2 98% 98% 82% 60° 80% 90% 16% 98%
Light gray #D3D3D3 83% 83% 83% — 0% 83% 0% 83%
Light green #90EE90 56% 93% 56% 120° 73% 75% 39% 93%
Light khaki #F0E68C 94% 90% 55% 54° 77% 75% 42% 94%
Light medium orchid #D39BCB 83% 61% 80% 309° 39% 72% 27% 83%
Light moss green #ADDFAD 68% 78% 68% 135° 20% 73% 20% 87%
Light orchid #E6A8D7 90% 66% 84% 315° 55% 78% 27% 90%
Light pastel purple #B19CD9 69% 61% 85% 261° 45% 73% 28% 85%
Light pink #FFB6C1 100% 71% 76% 351° 100% 86% 100% 86%
Light red ochre #E97451 91% 45% 32% 14° 78% 62% 65% 91%
Light salmon #FFA07A 100% 63% 48% 14° 100% 74% 62% 100%
Light salmon pink #FF9999 100% 60% 60% 0° 100% 80% 40% 100%
Light sea green #20B2AA 13% 70% 67% 175° 70% 41% 40% 75%
Light sky blue #87CEFA 53% 81% 98% 203° 92% 76% 46% 98%
Light slate gray #778899 47% 53% 60% 210° 14% 53% 22% 60%
Light steel blue #B0C4DE 69% 77% 87% 214° 41% 78% 21% 87%
Light taupe #B38B6D 70% 55% 43% 26° 32% 57% 39% 70%
Light Thulian pink #E68FAC 90% 56% 67% 330° 64% 73% 72% 94%
Light yellow #FFFFE0 100% 100% 88% 60° 100% 94% 7% 100%
Lilac #C8A2C8 78% 64% 78% 300° 26% 71% 19% 78%
Lime (color wheel) #BFFF00 75% 100% 0% 75° 100% 50% 100% 100%
Lime (web) (X11 green) #00FF00 0% 100% 0% 120° 100% 50% 100% 100%
Lime green #32CD32 20% 80% 20% 120° 61% 50% 67% 40%
Limerick #9DC209 62% 76% 4% 72° 91% 40% 95% 76%
Lincoln green #195905 10% 35% 2% 106° 89% 18% 94% 35%
Linen #FAF0E6 98% 94% 90% 30° 67% 94% 8% 98%
Lion #C19A6B 76% 60% 42% 33° 41% 59% 45% 76%
Little boy blue #6CA0DC 42% 63% 86% 212° 62% 64% 51% 86%
Liver #674C47 40% 30% 28% 9° 18% 34% 31% 30%
Liver (dogs) #B86D29 72% 43% 16% 28° 64% 44% 77% 72%
Liver (organ) #6C2E1F 42% 18% 12% 12° 55% 27% 71% 42%
Liver chestnut #987456 60% 45% 34% 27° 28% 47% 43% 60%
Lumber #FFE4CD 100% 89% 80% 8° 100% 90% 20% 100%
Lust #E62020 90% 13% 13% 0° 80% 51% 86% 90%
Magenta #FF00FF 100% 0% 100% 300° 100% 50% 100% 100%
Magenta (Crayola) #FF55A3 100% 33% 64% 332° 100% 67% 67% 100%
Magenta (dye) #CA1F7B 79% 12% 48% 326° 73% 46% 90% 79%
Magenta (Pantone) #D0417E 82% 25% 49% 334° 60% 54% 69% 82%
Magenta (process) #FF0090 100% 0% 56% 326° 100% 50% 100% 100%
Magic mint #AAF0D1 67% 94% 82% 150° 70% 80% 84% 80%
Magnolia #F8F4FF 97% 96% 100% 247° 100% 98% 94% 92%
Mahogany #C04000 75% 25% 0% 20° 100% 38% 100% 75%
Maize #FBEC5D 98% 93% 36% 54° 95% 68% 63% 98%
Majorelle Blue #6050DC 38% 31% 86% 247° 67% 59% 67% 59%
Malachite #0BDA51 4% 85% 32% 140° 90% 45% 95% 85%
Manatee #979AAA 59% 60% 67% 231° 10% 63% 11% 67%
Mango Tango #FF8243 100% 51% 26% 20° 100% 63% 74% 100%
Mantis #74C365 45% 76% 40% 110° 44% 58% 48% 77%
Mardi Gras #880085 53% 0% 54% 301° 100% 27% 100% 53%
Maroon (Crayola) #C32148 76% 13% 28% 345° 71% 45% 75% 38%
Maroon (HTML/CSS) #800000 50% 0% 0% 0° 100% 25% 100% 50%
Maroon (X11) #B03060 69% 19% 38% 333° 57% 44% 65% 42%
Mauve #E0B0FF 88% 69% 100% 276° 100% 85% 31% 100%
Mauve taupe #915F6D 57% 37% 43% 285° 21% 47% 37% 54%
Mauvelous #EF98AA 94% 60% 67% 348° 73% 77% 37% 94%
Maya blue #73C2FB 45% 76% 98% 210° 94% 72% 96% 87%
Meat brown #E5B73B 90% 72% 23% 44° 77% 57% 74% 90%
Medium aquamarine #66DDAA 40% 87% 67% 154° 64% 63% 54% 87%
Medium blue #0000CD 0% 0% 80% 240° 100% 40% 100% 80%
Medium candy apple red #E2062C 89% 2% 17% 350° 95% 46% 97% 89%
Medium carmine #AF4035 69% 25% 21% 5° 54% 45% 69% 68%
Medium champagne #F3E5AB 95% 90% 67% 48° 75% 81% 30% 95%
Medium electric blue #035096 1% 31% 59% 180° 96% 30% 30% 60%
Medium jungle green #1C352D 11% 21% 18% 120° 31% 16% 20% 20%
Medium lavender magenta #DDA0DD 87% 63% 87% 200° 47% 75% 28% 87%
Medium orchid #BA55D3 73% 33% 83% 288° 59% 58% 60% 83%
Medium Persian blue #0067A5 0% 40% 65% 248° 100% 32% 75% 48%
Medium purple #9370DB 58% 44% 86% 270° 60% 65% 68% 72%
Medium red-violet #BB3385 73% 20% 52% 322° 57% 47% 79% 83%
Medium ruby #AA4069 67% 25% 41% 337° 45% 46% 62% 67%
Medium sea green #3CB371 24% 70% 44% 150° 50% 47% 42% 30%
Medium sky blue #80DAEB 50% 85% 92% 190° 73% 71% 46% 92%
Medium slate blue #7B68EE 48% 41% 93% 249° 80% 67% 56% 93%
Medium spring bud #C9DC87 79% 86% 53% 80° 55% 70% 70% 80%
Medium spring green #00FA9A 0% 98% 60% 150° 100% 49% 97% 97%
Medium taupe #674C47 40% 30% 28% 9° 18% 34% 31% 40%
Medium turquoise #48D1CC 28% 82% 80% 175° 60% 55% 55% 50%
Medium Tuscan red #79443B 47% 27% 23% 9° 34% 35% 51% 53%
Medium vermilion #D9603B 85% 38% 23% 14° 68% 54% 73% 85%
Medium violet-red #C71585 78% 8% 52% 322° 81% 43% 89% 78%
Mellow apricot #F8B878 97% 72% 47% 30° 90% 72% 52% 97%
Mellow yellow #F8DE7E 97% 87% 49% 47° 90% 73% 49% 97%
Melon #FDBCB4 99% 74% 71% 7° 95% 85% 29% 99%
Metallic Seaweed #0A7E8C 3% 49% 55% 186° 89% 29% 94% 55%
Metallic Sunburst #9C7C38 61% 49% 22% 41° 47% 42% 64% 61%
Mexican pink #E4007C 89% 0% 49% 327° 100% 45% 100% 89%
Midnight blue #191970 10% 10% 44% 240° 64% 27% 78% 44%
Midnight green (eagle green) #004953 0% 29% 33% 187° 100% 16% 100% 33%
Midori #E3F988 89% 98% 53% 72° 90% 76% 45% 98%
Mikado yellow #FFC40C 100% 77% 5% 45° 100% 52% 95% 100%
Mint #3EB489 24% 71% 54% 158° 49% 48% 66% 71%
Mint cream #F5FFFA 96% 100% 98% 150° 100% 98% 4% 100%
Mint green #98FF98 60% 100% 60% 140° 100% 80% 40% 100%
Misty rose #FFE4E1 100% 89% 88% 6° 100% 94% 12% 100%
Moccasin #FAEBD7 98% 92% 84% 34° 78% 91% 14% 98%
Mode beige #967117 59% 44% 9% 43° 73% 34% 85% 59%
Moonstone blue #73A9C2 45% 66% 76% 199° 39% 61% 41% 76%
Mordant red 19 #AE0C00 68% 5% 0% 4° 100% 34% 100% 68%
Moss green #8A9A5B 54% 60% 36% 75° 26% 48% 41% 60%
Mountain Meadow #30BA8F 19% 73% 56% 161° 59% 46% 74% 73%
Mountbatten pink #997A8D 60% 48% 55% 323° 13% 54% 20% 60%
MSU Green #18453B 9% 27% 23% 167° 48% 18% 65% 27%
Mughal green #306030 19% 38% 19% 120° 33% 28% 50% 38%
Mulberry #C54B8C 77% 29% 55% 285° 51% 53% 67% 70%
Mustard #FFDB58 100% 86% 35% 47° 100% 67% 65% 100%
Myrtle green #317873 19% 47% 45% 176° 42% 33% 59% 47%"""
fields = [line.split('\t') for line in color_table.split('\n')]
color_names = [field[0].lower() for field in fields]
hex_codes = [field[1] for field in fields]
rgb = dict(zip(color_names, hex_codes))
|
jkitchin/pycse
|
pycse/colors.py
|
Python
|
gpl-2.0
| 34,118
|
[
"Amber",
"BLAST"
] |
4d7e9fc5be34dcbc7b2d03e8c8c0053bea2076538132da8ab006256f31ef5732
|
from flask import Flask, render_template, session, request, redirect
import random
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('index.html')
@app.route('/process', methods = ['POST'])
def process():
buildings = {
'farm':random.randint(5,10),
'casino':random.randint(-50,50),
'cave':random.randint(0,30),
'house':random.randint(0,5)
}
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
"""
Will this work?
at first we would import the random from function to work, but nothing would happen to session data.
"""
|
jiobert/python
|
Francisco_Trujillo/Assignments/flaskolympics/olympics6/server.py
|
Python
|
mit
| 777
|
[
"CASINO"
] |
4aa50803f2600695533031e6cc8b7f5ed6955fe2abf84f55153da4d3463d4e52
|
"""
Generate artificial waveforms of weakly electric fish.
The two main functions are
generate_wavefish()
generate_pulsefish()
for generating EODs of wave-type and pulse_type electric fish, respectively.
The following functions use the two functions to generate waveforms of specific fishes:
generate_alepto(): mimicks the wave-type fish Apteronotus leptorhynchus,
generate_eigenmannia(): mimicks the wave-type fish Eigenmannia,
generate_monophasic_pulses(): mimicks a monophasic pulsefish,
generate_biphasic_pulses(): mimicks a biphasic pulsefish,
generate_triphasic_pulses(): mimicks a triphasic pulsefish.
The frequency traces of communication signals are generated by
chirps_frequency() for chirps, and
rises_frequency() for rises.
The returned frequency of these functions can then be directly passed on
to generate_wavefish() for generating a frequency modulated EOD waveform.
"""
import numpy as np
def generate_wavefish(frequency=100.0, samplerate=44100., duration=1., noise_std=0.05,
amplitudes=1.0, phases=0.0):
"""
Generate EOD of a wave-type fish.
The waveform is constructed by superimposing sinewaves of integral multiples of
the fundamental frequency - the fundamental and its harmonics.
The fundamental frequency of the EOD is given by frequency. The amplitude of the
fundamental is given by the first element in amplitudes. The amplitudes and
relative phases of higher harmonics are give by optional further elements of
the amplitudes and phases lists.
The generated waveform is duration seconds long and is sampled with samplerate Hertz.
Gaussian white noise with a standard deviation of noise_std is added to the generated
waveform.
Parameters
----------
frequency: float or array of floats
EOD frequency of the fish in Hz. Either fixed number or array for
time-dependent frequencies.
samplerate: float
Sampling rate in Hz.
duration: float
Duration of the generated data in seconds. Only used if frequency is float.
noise_std: float
Standard deviation of additive Gaussian white noise.
amplitudes: float or list of floats
Amplitudes of fundamental and optional harmonics.
phases: float or list of floats
Relative phases of fundamental and optional harmonics in radians.
Returns
-------
data: array of floats
Generated data of a wave-type fish.
Raises
------
IndexError: amplitudes and phases differ in length.
"""
# compute phase:
if np.isscalar(frequency):
phase = np.arange(0, duration, 1./samplerate)
phase *= frequency
else:
phase = np.cumsum(frequency)/samplerate
# fix amplitudes and phases:
if np.isscalar(amplitudes):
amplitudes = [amplitudes]
if np.isscalar(phases):
phases = [phases]
if len(amplitudes) != len(phases):
raise IndexError('need exactly as many phases as amplitudes')
# generate EOD:
data = np.zeros(len(phase))
for har, (ampl, phi) in enumerate(zip(amplitudes, phases)):
data += ampl * np.sin(2*np.pi*(har+1)*phase+phi)
# add noise:
data += noise_std * np.random.randn(len(data))
return data
def generate_alepto(frequency=100.0, samplerate=44100., duration=1., noise_std=0.01):
"""Generate EOD of a Apteronotus leptorhynchus.
See generate_wavefish() for details.
"""
return generate_wavefish(frequency=frequency, samplerate=samplerate, duration=duration,
noise_std=noise_std, amplitudes=[1.0, 0.5, 0.1, 0.01, 0.001],
phases=[0.0, 0.0, 0.0, 0.0, 0.0])
def generate_eigenmannia(frequency=100.0, samplerate=44100., duration=1., noise_std=0.01):
"""Generate EOD of an Eigenmannia.
See generate_wavefish() for details.
"""
return generate_wavefish(frequency=frequency, samplerate=samplerate, duration=duration,
noise_std=noise_std, amplitudes=[1.0, 0.25, 0.0, 0.01],
phases=[0.0, 0.5*np.pi, 0.0, 0.0])
def chirps_frequency(eodf=100.0, samplerate=44100., duration=1.,
chirp_freq=5.0, chirp_size=100.0, chirp_width=0.01, chirp_kurtosis=1.0):
"""
Generate frequency trace with chirps.
A chirp is modeled as a Gaussian frequency modulation.
Parameters
----------
eodf: float
EOD frequency of the fish in Hz.
samplerate: float
Sampling rate in Hz.
duration: float
Duration of the generated data in seconds.
chirp_freq: float
Frequency of occurance of chirps in Hertz.
chirp_size: float
Size of the chirp (frequency increase above eodf) in Hertz.
chirp_width: float
Width of the chirp at 10% height in seconds.
chirp_kurtosis (float):
Shape of the chirp. =1: Gaussian, >1: more rectangular, <1: more peaked.
Returns
-------
data: array of floats
Generated frequency trace that can be passed on to generate_wavefish().
"""
# baseline eod frequency:
frequency = eodf * np.ones(int(duration*samplerate))
# time points for chirps:
chirp_period = 1.0/chirp_freq
chirp_times = np.arange(0.5*chirp_period, duration, chirp_period)
# chirp frequency waveform:
chirp_t = np.arange(-2.0*chirp_width, 2.0*chirp_width, 1./samplerate)
chirp_sig = 0.5*chirp_width / (2.0*np.log(10.0))**(0.5/chirp_kurtosis)
chirp = chirp_size * np.exp(-0.5*((chirp_t/chirp_sig)**2.0)**chirp_kurtosis)
# add chirps on baseline eodf:
for c in chirp_times:
index = int(c*samplerate)
if index+len(chirp) > len(frequency):
break
frequency[index:index+len(chirp)] += chirp
return frequency
def rises_frequency(eodf=100.0, samplerate=44100., duration=1.,
rise_freq=0.1, rise_size=10.0, rise_tau=1.0, decay_tau=10.0):
"""
Generate frequency trace with rises.
A rise is modeled as a double exponential frequency modulation.
Parameters
----------
eodf: float
EOD frequency of the fish in Hz.
samplerate: float
Sampling rate in Hz.
duration: float
Duration of the generated data in seconds.
rise_freq: float
Frequency of occurance of rises in Hertz.
rise_size: float
Size of the rise (frequency increase above eodf) in Hertz.
rise_tau: float
Time constant of the frequency increase of the rise in seconds.
decay_tau: float
Time constant of the frequency decay of the rise in seconds.
Returns
-------
data: array of floats
Generated frequency trace that can be passed on to generate_wavefish().
"""
# baseline eod frequency:
frequency = eodf * np.ones(int(duration*samplerate))
# time points for rises:
rise_period = 1.0/rise_freq
rise_times = np.arange(0.5*rise_period, duration, rise_period)
# rise frequency waveform:
rise_t = np.arange(0.0, 5.0*decay_tau, 1./samplerate)
rise = rise_size * (1.0-np.exp(-rise_t/rise_tau)) * np.exp(-rise_t/decay_tau)
# add rises on baseline eodf:
for r in rise_times:
index = int(r*samplerate)
if index+len(rise) > len(frequency):
rise_index = len(frequency)-index
frequency[index:index+rise_index] += rise[:rise_index]
break
else:
frequency[index:index+len(rise)] += rise
return frequency
def generate_pulsefish(frequency=100.0, samplerate=44100., duration=1., noise_std=0.01,
jitter_cv=0.1, peak_stds=0.001, peak_amplitudes=1.0, peak_times=0.0):
"""
Generate EOD of a pulse-type fish.
Pulses are spaced by 1/frequency, jittered as determined by jitter_cv. Each pulse is
a combination of Gaussian peaks, whose widths, amplitudes, and positions are given by
their standard deviation peak_stds, peak_amplitudes, and peak_times, respectively.
The generated waveform is duration seconds long and is sampled with samplerate Hertz.
Gaussian white noise with a standard deviation of noise_std is added to the generated
pulse train.
Parameters
----------
frequency: float
EOD frequency of the fish in Hz.
samplerate: float
Sampling Rate in Hz.
duration: float
Duration of the generated data in seconds.
noise_std: float
Standard deviation of additive Gaussian white noise.
jitter_cv: float
Gaussian distributed jitter of pulse times as coefficient of variation of inter-pulse intervals.
peak_stds: float or list of floats
Standard deviation of Gaussian shaped peaks in seconds.
peak_amplitudes: float or list of floats
Amplitude of each peak (positive and negative).
peak_times: float or list of floats
Position of each Gaussian peak in seconds.
Returns
-------
data: array of floats
Generated data of a pulse-type fish.
Raises
------
IndexError: peak_stds or peak_amplitudes or peak_times differ in length.
"""
# make sure peak properties are in a list:
if np.isscalar(peak_stds):
peak_stds = [peak_stds]
if np.isscalar(peak_amplitudes):
peak_amplitudes = [peak_amplitudes]
if np.isscalar(peak_times):
peak_times = [peak_times]
if len(peak_stds) != len(peak_amplitudes) or len(peak_stds) != len(peak_times):
raise IndexError('need exactly as many peak_stds as peak_amplitudes and peak_times')
# time axis for single pulse:
min_time_inx = np.argmin(peak_times)
max_time_inx = np.argmax(peak_times)
x = np.arange(-4.*peak_stds[min_time_inx] + peak_times[min_time_inx],
4.*peak_stds[max_time_inx] + peak_times[max_time_inx], 1.0/samplerate)
pulse_duration = x[-1] - x[0]
# generate a single pulse:
pulse = np.zeros(len(x))
for time, ampl, std in zip(peak_times, peak_amplitudes, peak_stds):
pulse += ampl * np.exp(-0.5*((x-time)/std)**2)
# paste the pulse into the noise floor:
time = np.arange(0, duration, 1. / samplerate)
data = np.random.randn(len(time)) * noise_std
period = 1.0/frequency
jitter_std = period * jitter_cv
first_pulse = np.max(pulse_duration, 3.0*jitter_std)
pulse_times = np.arange(first_pulse, duration, period )
pulse_times += np.random.randn(len(pulse_times)) * jitter_std
pulse_indices = np.round(pulse_times * samplerate).astype(np.int)
for inx in pulse_indices[(pulse_indices >= 0) & (pulse_indices < len(data)-len(pulse)-1)]:
data[inx:inx + len(pulse)] += pulse
return data
def generate_monophasic_pulses(frequency=100.0, samplerate=44100., duration=1.,
noise_std=0.01, jitter_cv=0.1):
"""Generate EOD of a monophasic pulse-type fish.
See generate_pulsefish() for details.
"""
return generate_pulsefish(frequency=frequency, samplerate=samplerate, duration=duration,
noise_std=noise_std, jitter_cv=jitter_cv,
peak_stds=0.0003, peak_amplitudes=1.0, peak_times=0.0)
def generate_biphasic_pulses(frequency=100.0, samplerate=44100., duration=1.,
noise_std=0.01, jitter_cv=0.1):
"""Generate EOD of a biphasic pulse-type fish.
See generate_pulsefish() for details.
"""
return generate_pulsefish(frequency=frequency, samplerate=samplerate, duration=duration,
noise_std=noise_std, jitter_cv=jitter_cv,
peak_stds=[0.0001, 0.0002],
peak_amplitudes=[1.0, -0.3],
peak_times=[0.0, 0.0003])
def generate_triphasic_pulses(frequency=100.0, samplerate=44100., duration=1.,
noise_std=0.01, jitter_cv=0.1):
"""Generate EOD of a triphasic pulse-type fish.
See generate_pulsefish() for details.
"""
return generate_pulsefish(frequency=frequency, samplerate=samplerate, duration=duration,
noise_std=noise_std, jitter_cv=jitter_cv,
peak_stds=[0.0001, 0.0001, 0.0002],
peak_amplitudes=[1.0, -0.8, 0.1],
peak_times=[0.0, 0.00015, 0.0004])
def main():
import sys
import os
import matplotlib.pyplot as plt
from audioio import write_audio
from .consoleinput import read, select, save_inputs
if len(sys.argv) > 1:
if len(sys.argv) == 2 or sys.argv[1] != '-s':
print('usage: fakefish [-h|--help] [-s audiofile]')
print('')
print('Without arguments, run a demo for illustrating fakefish functionality.')
print('')
print('-s audiofile: writes audiofile with user defined simulated electric fishes.')
print('')
print('by bendalab (2017)')
else:
# generate file:
audiofile = sys.argv[2]
samplerate = read('Sampling rate in Hz', '44100', float, 1.0)
duration = read('Duration in seconds', '10', float, 0.001)
nfish = read('Number of fish', '1', int, 1)
ndata = read('Number of electrodes', '1', int, 1)
fish_spread = 1
if ndata > 1:
fish_spread = read('Number of electrodes fish are spread over', '2', int, 1)
data = np.random.randn(int(duration*samplerate), ndata)*0.01
fish_indices = np.random.randint(ndata, size=nfish)
eodt = 'a'
eodf = 800.0
eoda = 1.0
eodsig = 'n'
pulse_jitter = 0.1
chirp_freq = 5.0
chirp_size = 100.0
chirp_width = 0.01
chirp_kurtosis = 1.0
rise_freq = 0.1
rise_size = 10.0
rise_tau = 1.0
rise_decay_tau = 10.0
for k in range(nfish):
print('')
fish = 'Fish %d: ' % (k+1)
eodt = select(fish + 'EOD type', eodt, ['a', 'e', '1', '2', '3'],
['Apteronotus', 'Eigenmannia',
'monophasic pulse', 'biphasic pulse', 'triphasic pulse'])
eodf = read(fish + 'EOD frequency in Hz', '%g'%eodf, float, 1.0, 3000.0)
eoda = read(fish + 'EOD amplitude', '%g'%eoda, float, 0.0, 10.0)
if eodt in 'ae':
eodsig = select(fish + 'Add communication signals', eodsig, ['n', 'c', 'r'],
['fixed EOD', 'chirps', 'rises'])
eodfreq = eodf
if eodsig == 'c':
chirp_freq = read('Number of chirps per second', '%g'%chirp_freq, float, 0.001)
chirp_size = read('Size of chirp in Hz', '%g'%chirp_size, float, 1.0)
chirp_width = 0.001*read('Width of chirp in ms', '%g'%(1000.0*chirp_width), float, 1.0)
eodfreq = chirps_frequency(eodf, samplerate, duration,
chirp_freq, chirp_size, chirp_width, chirp_kurtosis)
elif eodsig == 'r':
rise_freq = read('Number of rises per second', '%g'%rise_freq, float, 0.00001)
rise_size = read('Size of rise in Hz', '%g'%rise_size, float, 0.01)
rise_tau = read('Time-constant of rise onset in seconds', '%g'%rise_tau, float, 0.01)
rise_decay_tau = read('Time-constant of rise decay in seconds', '%g'%rise_decay_tau, float, 0.01)
eodfreq = rises_frequency(eodf, samplerate, duration,
rise_freq, rise_size, rise_tau, rise_decay_tau)
if eodt == 'a':
fishdata = eoda*generate_alepto(eodfreq, samplerate, duration=duration,
noise_std=0.0)
elif eodt == 'e':
fishdata = eoda*generate_eigenmannia(eodfreq, samplerate, duration=duration,
noise_std=0.0)
else:
pulse_jitter = read(fish + 'CV of pulse jitter', '%g'%pulse_jitter, float, 0.0, 2.0)
if eodt == '1':
fishdata = eoda*generate_monophasic_pulses(eodf, samplerate, duration,
jitter_cv=pulse_jitter,
noise_std=0.0)
elif eodt == '2':
fishdata = eoda*generate_biphasic_pulses(eodf, samplerate, duration,
jitter_cv=pulse_jitter,
noise_std=0.0)
elif eodt == '3':
fishdata = eoda*generate_triphasic_pulses(eodf, samplerate, duration,
jitter_cv=pulse_jitter,
noise_std=0.0)
i = fish_indices[k]
for j in range(fish_spread):
data[:, (i+j)%ndata] += fishdata*(0.2**j)
maxdata = np.max(np.abs(data))
write_audio(audiofile, 0.9*data/maxdata, samplerate)
input_file = os.path.splitext(audiofile)[0] + '.inp'
save_inputs(input_file)
print('\nWrote fakefish data to file "%s".' % audiofile)
else:
# demo:
samplerate = 40000. # in Hz
duration = 10.0 # in sec
inset_len = 0.01 # in sec
inset_indices = int(inset_len*samplerate)
ws_fac = 0.1 # whitespace factor or ylim (between 0. and 1.; preferably a small number)
# generate data:
time = np.arange(0, duration, 1./samplerate)
eodf = 400.0
#eodf = 500.0 - time/duration*400.0
wavefish = generate_wavefish(eodf, samplerate, duration=duration, noise_std=0.02,
amplitudes=[1.0, 0.5, 0.1, 0.0001],
phases=[0.0, 0.0, 0.0, 0.0])
eodf = 650.0
# wavefish = generate_alepto(eodf, samplerate, duration=duration)
wavefish += 0.5*generate_eigenmannia(eodf, samplerate, duration=duration)
pulsefish = generate_pulsefish(80., samplerate, duration=duration,
noise_std=0.02, jitter_cv=0.1,
peak_stds=[0.0001, 0.0002],
peak_amplitudes=[1.0, -0.3],
peak_times=[0.0, 0.0003])
# pulsefish = generate_monophasic_pulses(80., samplerate, duration=duration)
# pulsefish = generate_biphasic_pulses(80., samplerate, duration=duration)
# pulsefish = generate_triphasic_pulses(80., samplerate, duration=duration)
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(19, 10))
# get proper wavefish ylim
ymin = np.min(wavefish)
ymax = np.max(wavefish)
dy = ws_fac*(ymax - ymin)
ymin -= dy
ymax += dy
# complete wavefish:
ax[0][0].set_title('Wavefish')
ax[0][0].set_ylim(ymin, ymax)
ax[0][0].plot(time, wavefish)
# wavefish zoom in:
ax[0][1].set_title('Wavefish ZOOM IN')
ax[0][1].set_ylim(ymin, ymax)
ax[0][1].plot(time[:inset_indices], wavefish[:inset_indices], '-o')
# get proper pulsefish ylim
ymin = np.min(pulsefish)
ymax = np.max(pulsefish)
dy = ws_fac*(ymax - ymin)
ymin -= dy
ymax += dy
# complete pulsefish:
ax[1][0].set_title('Pulsefish')
ax[1][0].set_ylim(ymin, ymax)
ax[1][0].plot(time, pulsefish)
# pulsefish zoom in:
ax[1][1].set_title('Pulsefish ZOOM IN')
ax[1][1].set_ylim(ymin, ymax)
ax[1][1].plot(time[:inset_indices/2], pulsefish[:inset_indices/2], '-o')
for row in ax:
for c_ax in row:
c_ax.set_xlabel('Time [sec]')
c_ax.set_ylabel('Amplitude [a.u.]')
plt.tight_layout()
# chirps:
chirps_freq = chirps_frequency(600.0, samplerate, duration=duration, chirp_kurtosis=1.0)
chirps_data = generate_alepto(chirps_freq, samplerate)
# rises:
rises_freq = rises_frequency(600.0, samplerate, duration=duration, rise_size=20.0)
rises_data = generate_alepto(rises_freq, samplerate)
nfft = 256
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(19, 10))
ax[0].set_title('Chirps')
ax[0].specgram(chirps_data, Fs=samplerate, NFFT=nfft, noverlap=nfft//16)
time = np.arange(len(chirps_freq))/samplerate
ax[0].plot(time[:-nfft/2], chirps_freq[nfft/2:], '-k', lw=2)
ax[0].set_ylim(0.0, 3000.0)
ax[0].set_ylabel('Frequency [Hz]')
nfft = 4096
ax[1].set_title('Rises')
ax[1].specgram(rises_data, Fs=samplerate, NFFT=nfft, noverlap=nfft//2)
time = np.arange(len(rises_freq))/samplerate
ax[1].plot(time[:-nfft/4], rises_freq[nfft/4:], '-k', lw=2)
ax[1].set_ylim(500.0, 700.0)
ax[1].set_ylabel('Frequency [Hz]')
ax[1].set_xlabel('Time [s]')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
jfsehuanes/thunderfish
|
thunderfish/fakefish.py
|
Python
|
gpl-3.0
| 21,706
|
[
"Gaussian"
] |
acd6a041bff798e3f5bfc8babbd9b5944098ebc40aa2f172886b9b401599738e
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import jinja2
import os
import datetime
import tusers
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class GridHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
#Get the requested tournament
tid = self.request.get('t')
t_key = ndb.Key('Tournament', int(tid))
t = t_key.get()
if (user and user.key in t.owner):
#Get all of the rooms whose parent is the current tournament
q = t.rooms()
template_values = {
'user' : user,
't' : t,
'rooms' : q,
'logout' : tusers.create_logout_url('/'),
}
#Check if this is an AJAX request for the raw table
if (self.request.get('raw') == '1'):
template = JINJA_ENVIRONMENT.get_template('view/rawgrid.html')
else:
template = JINJA_ENVIRONMENT.get_template('view/trackgrid.html')
self.response.write(template.render(template_values))
else:
self.redirect(tusers.create_login_url(self.request.uri))
def post(self):
user = tusers.get_current_user()
#Get the requested tournament
tid = self.request.get('t')
t_key = ndb.Key('Tournament', int(tid))
t = t_key.get()
if (user and user.key in t.owner):
rid = self.request.get('r')
r_key = ndb.Key('Tournament', int(tid), 'Room', int(rid))
room = r_key.get()
room.status = self.request.get('status')
room.comment = self.request.get('comment')
room.changed = datetime.datetime.now().time()
room.put()
self.redirect('/trackgrid?t=' + str(t_key.id()))
else:
self.redirect(tusers.create_login_url(self.request.uri))
app = webapp2.WSGIApplication([
('/trackgrid', GridHandler)
], debug=True)
|
sarrionandia/tournatrack
|
trackgrid.py
|
Python
|
apache-2.0
| 2,395
|
[
"Brian"
] |
2ca9eab363931ac4c17681ff1cac4fae4e1f15e7bb2a57d66b2027e64503b52d
|
import espressopp
# create default Lennard Jones (WCA) system with 0 particles and cubic box (L=10)
system, integrator = espressopp.standard_system.LennardJones(0, (10*1.12, 10*1.12, 10*1.12))
C_FIXED = 1
C_FREE = 0
# fix x,y and z coord axis
fixMask = espressopp.Int3D(C_FREE, C_FIXED, C_FREE)
# create a particel group that will contain the fixed particles
fixedWall = espressopp.ParticleGroup(system.storage)
# add a particle wall
pid = 1
for k in range(10):
for l in range(10):
system.storage.addParticle(pid, espressopp.Real3D(k*1.12, 5, l*1.12))
fixedWall.add(pid)
pid += 1
# add also one free particle
system.storage.addParticle(0, espressopp.Real3D(5.8,9,5.5))
system.storage.modifyParticle(0, 'v', espressopp.Real3D(0, -0.1, 0))
# don't forget do decompose !
system.storage.decompose()
# create FixPositions Extension and add it to the integrator
fixpositions = espressopp.integrator.FixPositions(system, fixedWall, fixMask)
integrator.addExtension(fixpositions)
# run the simulation
sock = espressopp.tools.vmd.connect(system)
for i in range(10000):
integrator.run(100)
espressopp.tools.vmd.imd_positions(system, sock)
|
fedepad/espressopp
|
examples/fix_particles/fix_particles.py
|
Python
|
gpl-3.0
| 1,157
|
[
"VMD"
] |
bc9f1557deb43c923a039ecd0bfd69e656fbab6288694b805cb25d46c9bc9f56
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
from pyscf import fci
from pyscf.fci import fci_slow
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': '6-31g'}
mol.build()
m = scf.RHF(mol)
m.conv_tol = 1e-15
m.conv_tol_grad = 1e-7
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff)).round(9)
g2e = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False).round(9)
na = fci.cistring.num_strings(norb, nelec//2)
numpy.random.seed(15)
ci0 = numpy.random.random((na,na))
ci0 = ci0 + ci0.T
ci0 /= numpy.linalg.norm(ci0)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
ci1 /= numpy.linalg.norm(ci1)
def tearDownModule():
global mol, m, h1e, g2e, ci0, ci1
del mol, m, h1e, g2e, ci0, ci1
class KnownValues(unittest.TestCase):
def test_contract(self):
ci1 = fci.direct_spin0.contract_1e(h1e, ci0, norb, nelec)
ci1ref = fci.direct_spin1.contract_1e(h1e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 9.1191973750140729, 8)
ci1 = fci.direct_spin0.contract_2e(g2e, ci0, norb, nelec)
ci1ref = fci.direct_spin1.contract_2e(g2e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 15.076640155228787, 7)
def test_kernel(self):
e, c = fci.direct_spin0.kernel(h1e, g2e, norb, nelec)
self.assertAlmostEqual(e, -9.1491239851241737, 8)
e = fci.direct_spin0.energy(h1e, g2e, c, norb, nelec)
self.assertAlmostEqual(e, -9.1491239851241737, 8)
def test_rdm1(self):
dm1ref = fci.direct_spin1.make_rdm1(ci0, norb, nelec)
dm1 = fci.direct_spin0.make_rdm1(ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7059849569286722, 10)
norb1 = nelec
na = fci.cistring.num_strings(norb1, nelec//2)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
dm1 = fci.direct_spin0.make_rdm1(ci1, norb1, nelec)
ref1 = fci_slow.make_rdm1(ci1, norb1, nelec)
self.assertAlmostEqual(abs(ref1-dm1).max(), 0, 10)
def test_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.make_rdm12(ci0, norb, nelec)
dm1, dm2 = fci.direct_spin0.make_rdm12(ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7059849569286731, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 7.8811473403497736, 10)
norb1 = nelec
na = fci.cistring.num_strings(norb1, nelec//2)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
dm1, dm2 = fci.direct_spin0.make_rdm12(ci1, norb1, nelec)
ref1, ref2 = fci_slow.make_rdm12(ci1, norb1, nelec)
self.assertAlmostEqual(abs(ref1-dm1).max(), 0, 10)
self.assertAlmostEqual(abs(ref2-dm2).max(), 0, 10)
def test_trans_rdm1(self):
dm1ref = fci.direct_spin1.trans_rdm1(ci0, ci1, norb, nelec)
dm1 = fci.direct_spin0.trans_rdm1(ci0, ci1, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.5485017426647461, 10)
dm0 = fci.direct_spin0.make_rdm1(ci0, norb, nelec)
dm1 = fci.direct_spin0.trans_rdm1(ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1, dm0))
def test_trans_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.trans_rdm12(ci0, ci1, norb, nelec)
dm1, dm2 = fci.direct_spin0.trans_rdm12(ci0, ci1, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.5485017426647461, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 7.7327573770739235, 10)
_,dm0 = fci.direct_spin0.make_rdm12(ci0, norb, nelec)
_,dm2 = fci.direct_spin0.trans_rdm12(ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm2,dm0))
def test_davidson_only(self):
x = 3.0 * 0.529177249
y = (2.54 - 0.46 * 3.0) * 0.529177249
mol = gto.M(
verbose = 0,
atom = [
['Be',( 0., 0. , 0. )],
['H', ( x, -y , 0. )],
['H', ( x, y , 0. )],],
symmetry = True,
basis = '6-311g')
mf = scf.RHF(mol)
mf.scf()
mf._scf = mf
h1e = mcscf.casci.h1e_for_cas(mf, mf.mo_coeff, ncas=2, ncore=2)[0]
eri = ao2mo.incore.full(mf._eri, mf.mo_coeff[:,2:4])
cis = fci.direct_spin0.FCISolver(mol)
cis.davidson_only = True
ci0 = numpy.zeros((2,2))
ci0[0,0] = 1
e, c = cis.kernel(h1e, eri, 2, 2, ci0)
self.assertAlmostEqual(e, -0.80755526695538049, 10)
cis = fci.direct_spin0_symm.FCISolver(mol)
# Test the default initial guess. It should give "0" in the results
cis.get_init_guess = None
cis.dump_flags()
e, c = cis.kernel(h1e, eri, 2, 2, orbsym=mf.mo_coeff.orbsym[2:4])
self.assertAlmostEqual(e, 0, 10)
def test_gen_linkstr(self):
sol = fci.direct_spin0.FCI(mol)
link1 = sol.gen_linkstr(7, 6, tril=True)
link1[:,:,1] = 0
link2 = sol.gen_linkstr(7, (3,3), tril=False)
self.assertAlmostEqual(abs(link1 - fci.cistring.reform_linkstr_index(link2)).max(), 0, 12)
def test_small_system(self):
sol = fci.direct_spin0.FCI()
norb = 6
nelec = (3,3)
numpy.random.seed(9)
h1e = numpy.random.random((norb,norb))
h1e = h1e + h1e.T
g2e = numpy.random.random((norb,norb,norb,norb))
eri = .5* ao2mo.restore(1, ao2mo.restore(8, g2e, norb), norb)
h = fci.direct_spin1.pspace(h1e, eri, norb, nelec, np=5000)[1]
eref, c0 = numpy.linalg.eigh(h)
e, c1 = sol.kernel(h1e, eri, norb, (norb,norb))
self.assertAlmostEqual(e, 20.52279077686709, 12)
e, c1 = sol.kernel(h1e, eri, norb, nelec, nroots=4)
self.assertAlmostEqual(abs(eref[[0,1,3,5]] - e).max(), 0, 8)
if __name__ == "__main__":
print("Full Tests for spin0")
unittest.main()
|
gkc1000/pyscf
|
pyscf/fci/test/test_spin0.py
|
Python
|
apache-2.0
| 7,320
|
[
"PySCF"
] |
007e83a4f55a0265d73a21a8ccc8d317db2309aa1cfddc67cc8abd23ddbbc532
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for basic switchers like Metacity."""
from .script import Script
from .script_utilities import Utilities
|
GNOME/orca
|
src/orca/scripts/switcher/__init__.py
|
Python
|
lgpl-2.1
| 905
|
[
"ORCA"
] |
39e6edece30263d6d9955eccc37d16b79b7e88407ef5acec630667280fc178fc
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Languaeg modeling experiments in mtf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.models import mtf_transformer
from tensor2tensor.models import mtf_transformer2
from tensor2tensor.models.research import moe
from tensor2tensor.utils import registry
@registry.register_hparams
def xmoe_tr_dense_2k():
"""Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
"""
hparams = mtf_transformer2.mtf_bitransformer_base()
hparams.encoder_layers = ["self_att", "drd"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4
hparams.batch_size = 64
hparams.shared_embedding_and_softmax_weights = True
hparams.mesh_shape = "batch:8"
return hparams
@registry.register_hparams
def xmoe_tr_dense_32k():
"""Bigger d_ff.
623M params, einsum=3.42e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.d_ff = 32768
return hparams
@registry.register_hparams
def xmoe_tr_1d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.encoder_layers = ["self_att", "moe_1d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4
hparams.layout = "batch:batch;experts:batch"
hparams.moe_hidden_size = 2048
hparams.moe_num_experts = 16
return hparams
@registry.register_hparams
def xmoe_tr_2d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.encoder_layers = ["self_att", "moe_2d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_2d"] * 4
hparams.moe_hidden_size = 2048
hparams.moe_experts_x = 4
hparams.moe_experts_y = 4
return hparams
@registry.register_hparams
def xmoe_dense_4k():
"""Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
# The following hparams are constant across all these experiments.
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
# We will vary the following parameters related to the ffn/moe layers.
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams
@registry.register_hparams
def xmoe_dense_8k():
hparams = xmoe_dense_4k()
hparams.d_ff = 8192
return hparams
@registry.register_hparams
def xmoe_dense_64k():
"""Very wide layer- run on 4x4."""
hparams = xmoe_dense_4k()
hparams.d_ff = 65536
hparams.mesh_shape = "model:4,batch:8"
return hparams
@registry.register_hparams
def xmoe_top_2():
"""Mixture of experts (16 experts)."""
hparams = xmoe_dense_4k()
moe.set_default_moe_hparams(hparams)
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
return hparams
@registry.register_hparams
def xmoe_top_2_c15():
"""Mixture of experts."""
hparams = xmoe_top_2()
hparams.moe_capacity_factor_train = 1.5
return hparams
@registry.register_hparams
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams
@registry.register_hparams
def xmoe_2d_debug():
"""For debugging.
Running this model on TPU without the hack of casting to bfloat16 for
alltoall results in nan on the first step.
TODO(noam): debug
Returns:
a hparams
"""
hparams = xmoe_2d()
hparams.decoder_layers = ["hmoe"] * 1
hparams.activation_dtype = "float32"
return hparams
@registry.register_hparams
def xmoe_2d_c15():
"""Mixture of experts."""
hparams = xmoe_2d()
hparams.moe_capacity_factor_train = 1.5
return hparams
@registry.register_hparams
def xmoe_2d_x64():
"""Two-dimensional hierarchical mixture of 64 experts."""
hparams = xmoe_2d()
# hparams.mesh_shape = "b0:4;b1:8"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 8]
return hparams
@registry.register_hparams
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
@registry.register_hparams
def xmoe2_dense_0():
return xmoe2_dense(0)
@registry.register_hparams
def xmoe2_dense_1():
return xmoe2_dense(1)
@registry.register_hparams
def xmoe2_dense_2():
return xmoe2_dense(2)
@registry.register_hparams
def xmoe2_dense_3():
return xmoe2_dense(3)
@registry.register_hparams
def xmoe2_v1():
"""Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
"""
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
@registry.register_hparams
def xmoe2_v1_x128():
"""128 experts, ~25B params - Train for 131072 steps on 8x8."""
hparams = xmoe2_v1()
hparams.moe_num_experts = [16, 8]
hparams.outer_batch_size = 8
hparams.mesh_shape = "b0:8;b1:16"
hparams.batch_size = 512
hparams.learning_rate_decay_steps = 16384
return hparams
@registry.register_hparams
def xmoe2_tiny():
"""Test on local cpu."""
hparams = xmoe2_v1()
hparams.decoder_layers = [
"local_att", "att", "compressed_att", "drd", "hmoe"]
hparams.d_model = 128
hparams.moe_hidden_size = 512
hparams.outer_batch_size = 0
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.activation_dtype = "float32"
return hparams
@registry.register_hparams
def xmoe2_v1_l4k():
"""With sequence length 4096."""
hparams = xmoe2_v1()
hparams.batch_size = 32
hparams.max_length = 4096
hparams.split_to_length = 4096
hparams.reshape_logits_hack = True
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_global_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_compressed_c4():
"""With compressed attention."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"compressed_att" if l == "att" else l for l in hparams.decoder_layers]
hparams.compression_factor = 4
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_compressed_c8():
"""With compressed attention."""
hparams = xmoe2_v1_l4k_compressed_c4()
hparams.compression_factor = 8
return hparams
@registry.register_hparams
def wiki_2x2_base():
"""Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
# no dropout - dataset is big enough to avoid overfitting.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
# 4 sequences per core
hparams.batch_size = 32
# We don't use linear decay in these experiments, since we don't want
# a sharp jump in quality at the end of the training schedule.
# You can insert this once you find the right architecture.
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
# parameters for mixture-of-experts
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = ["att", "drd"] * 6
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams
@registry.register_hparams
def wiki_2x2_v1():
hparams = wiki_2x2_base()
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "moe"] * 4)[:-1]
return hparams
@registry.register_hparams
def wiki_2x2_local():
hparams = wiki_2x2_base()
hparams.decoder_layers = ["local_att", "drd"] * 6
return hparams
@registry.register_hparams
def denoise_m15():
"""Denoising experiment."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_m30():
"""More masking during training."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_dense_2_m30():
"""More masking during training."""
hparams = xmoe2_dense_2()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams
@registry.register_hparams
def denoise_t15():
"""Noise up with dropout and a little transformer."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {
"type": "transformer",
"overrides": {
"noising_spec_train": {"type": "mask", "prob": 0.15},
"noising_use_eval_during_train": 0.0,
"decoder_layers": ["att", "drd"] * 4,
"num_heads": 4,
"d_model": 512,
"d_ff": 2048,
}
}
return hparams
@registry.register_hparams
def denoise_v1_m15():
"""Denoising experiment."""
hparams = xmoe2_v1()
# no local attention
# TODO(noam): non-masked version of local-attention
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_v1_m30():
"""More masking during training."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_v1_m50():
"""More masking during training."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "mask", "prob": 0.5}
return hparams
@registry.register_hparams
def denoise_v1_z15():
"""Replace tokens instead of masking."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_v1_t15():
"""Noise up with dropout and a little transformer."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {
"type": "transformer",
"overrides": {
"noising_spec_train": {"type": "mask", "prob": 0.15},
"noising_use_eval_during_train": 0.0,
"decoder_layers": ["att", "drd"] * 4,
"num_heads": 4,
"d_model": 512,
"d_ff": 2048,
}
}
return hparams
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
Python
|
apache-2.0
| 14,773
|
[
"MOE"
] |
210f8bd437be992eb92be019e94aa50020e798de7fa1306564e9f789fe8b3cc5
|
# Copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the sprotXX.dat file from
SwissProt.
http://www.expasy.ch/sprot/sprot-top.html
Tested with:
Release 56.9, 03-March-2009.
Classes:
Record Holds SwissProt data.
Reference Holds reference data from a SwissProt record.
Functions:
read Read one SwissProt record
parse Read multiple SwissProt records
"""
from Bio._py3k import _as_string
class Record(object):
"""Holds information from a SwissProt record.
Members:
entry_name Name of this entry, e.g. RL1_ECOLI.
data_class Either 'STANDARD' or 'PRELIMINARY'.
molecule_type Type of molecule, 'PRT',
sequence_length Number of residues.
accessions List of the accession numbers, e.g. ['P00321']
created A tuple of (date, release).
sequence_update A tuple of (date, release).
annotation_update A tuple of (date, release).
description Free-format description.
gene_name Gene name. See userman.txt for description.
organism The source of the sequence.
organelle The origin of the sequence.
organism_classification The taxonomy classification. List of strings.
(http://www.ncbi.nlm.nih.gov/Taxonomy/)
taxonomy_id A list of NCBI taxonomy id's.
host_organism A list of names of the hosts of a virus, if any.
host_taxonomy_id A list of NCBI taxonomy id's of the hosts, if any.
references List of Reference objects.
comments List of strings.
cross_references List of tuples (db, id1[, id2][, id3]). See the docs.
keywords List of the keywords.
features List of tuples (key name, from, to, description).
from and to can be either integers for the residue
numbers, '<', '>', or '?'
seqinfo tuple of (length, molecular weight, CRC32 value)
sequence The sequence.
"""
def __init__(self):
self.entry_name = None
self.data_class = None
self.molecule_type = None
self.sequence_length = None
self.accessions = []
self.created = None
self.sequence_update = None
self.annotation_update = None
self.description = []
self.gene_name = ''
self.organism = []
self.organelle = ''
self.organism_classification = []
self.taxonomy_id = []
self.host_organism = []
self.host_taxonomy_id = []
self.references = []
self.comments = []
self.cross_references = []
self.keywords = []
self.features = []
self.seqinfo = None
self.sequence = ''
class Reference(object):
"""Holds information from one reference in a SwissProt entry.
Members:
number Number of reference in an entry.
positions Describes extent of work. list of strings.
comments Comments. List of (token, text).
references References. List of (dbname, identifier)
authors The authors of the work.
title Title of the work.
location A citation for the work.
"""
def __init__(self):
self.number = None
self.positions = []
self.comments = []
self.references = []
self.authors = []
self.title = []
self.location = []
def parse(handle):
while True:
record = _read(handle)
if not record:
return
yield record
def read(handle):
record = _read(handle)
if not record:
raise ValueError("No SwissProt record found")
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one SwissProt record found")
return record
# Everything below is considered private
def _read(handle):
record = None
unread = ""
for line in handle:
#This is for Python 3 to cope with a binary handle (byte strings),
#or a text handle (unicode strings):
line = _as_string(line)
key, value = line[:2], line[5:].rstrip()
if unread:
value = unread + " " + value
unread = ""
if key=='**':
#See Bug 2353, some files from the EBI have extra lines
#starting "**" (two asterisks/stars). They appear
#to be unofficial automated annotations. e.g.
#**
#** ################# INTERNAL SECTION ##################
#**HA SAM; Annotated by PicoHamap 1.88; MF_01138.1; 09-NOV-2003.
pass
elif key=='ID':
record = Record()
_read_id(record, line)
_sequence_lines = []
elif key=='AC':
accessions = [word for word in value.rstrip(";").split("; ")]
record.accessions.extend(accessions)
elif key=='DT':
_read_dt(record, line)
elif key=='DE':
record.description.append(value.strip())
elif key=='GN':
if record.gene_name:
record.gene_name += " "
record.gene_name += value
elif key=='OS':
record.organism.append(value)
elif key=='OG':
record.organelle += line[5:]
elif key=='OC':
cols = [col for col in value.rstrip(";.").split("; ")]
record.organism_classification.extend(cols)
elif key=='OX':
_read_ox(record, line)
elif key=='OH':
_read_oh(record, line)
elif key=='RN':
reference = Reference()
_read_rn(reference, value)
record.references.append(reference)
elif key=='RP':
assert record.references, "RP: missing RN"
record.references[-1].positions.append(value)
elif key=='RC':
assert record.references, "RC: missing RN"
reference = record.references[-1]
unread = _read_rc(reference, value)
elif key=='RX':
assert record.references, "RX: missing RN"
reference = record.references[-1]
_read_rx(reference, value)
elif key=='RL':
assert record.references, "RL: missing RN"
reference = record.references[-1]
reference.location.append(value)
# In UniProt release 1.12 of 6/21/04, there is a new RG
# (Reference Group) line, which references a group instead of
# an author. Each block must have at least 1 RA or RG line.
elif key=='RA':
assert record.references, "RA: missing RN"
reference = record.references[-1]
reference.authors.append(value)
elif key=='RG':
assert record.references, "RG: missing RN"
reference = record.references[-1]
reference.authors.append(value)
elif key=="RT":
assert record.references, "RT: missing RN"
reference = record.references[-1]
reference.title.append(value)
elif key=='CC':
_read_cc(record, line)
elif key=='DR':
_read_dr(record, value)
elif key=='PE':
#TODO - Record this information?
pass
elif key=='KW':
cols = value.rstrip(";.").split('; ')
record.keywords.extend(cols)
elif key=='FT':
_read_ft(record, line)
elif key=='SQ':
cols = value.split()
assert len(cols) == 7, "I don't understand SQ line %s" % line
# Do more checking here?
record.seqinfo = int(cols[1]), int(cols[3]), cols[5]
elif key==' ':
_sequence_lines.append(value.replace(" ", "").rstrip())
elif key=='//':
# Join multiline data into one string
record.description = " ".join(record.description)
record.organism = " ".join(record.organism)
record.organelle = record.organelle.rstrip()
for reference in record.references:
reference.authors = " ".join(reference.authors).rstrip(";")
reference.title = " ".join(reference.title).rstrip(";")
if reference.title.startswith('"') and reference.title.endswith('"'):
reference.title = reference.title[1:-1] #remove quotes
reference.location = " ".join(reference.location)
record.sequence = "".join(_sequence_lines)
return record
else:
raise ValueError("Unknown keyword '%s' found" % key)
if record:
raise ValueError("Unexpected end of stream.")
def _read_id(record, line):
cols = line[5:].split()
#Prior to release 51, included with MoleculeType:
#ID EntryName DataClass; MoleculeType; SequenceLength AA.
#
#Newer files lack the MoleculeType:
#ID EntryName DataClass; SequenceLength AA.
if len(cols) == 5:
record.entry_name = cols[0]
record.data_class = cols[1].rstrip(";")
record.molecule_type = cols[2].rstrip(";")
record.sequence_length = int(cols[3])
elif len(cols) == 4:
record.entry_name = cols[0]
record.data_class = cols[1].rstrip(";")
record.molecule_type = None
record.sequence_length = int(cols[2])
else:
raise ValueError("ID line has unrecognised format:\n"+line)
# check if the data class is one of the allowed values
allowed = ('STANDARD', 'PRELIMINARY', 'IPI', 'Reviewed', 'Unreviewed')
if record.data_class not in allowed:
raise ValueError("Unrecognized data class %s in line\n%s" % \
(record.data_class, line))
# molecule_type should be 'PRT' for PRoTein
# Note that has been removed in recent releases (set to None)
if record.molecule_type not in (None, 'PRT'):
raise ValueError("Unrecognized molecule type %s in line\n%s" % \
(record.molecule_type, line))
def _read_dt(record, line):
value = line[5:]
uprline = value.upper()
cols = value.rstrip().split()
if 'CREATED' in uprline \
or 'LAST SEQUENCE UPDATE' in uprline \
or 'LAST ANNOTATION UPDATE' in uprline:
# Old style DT line
# =================
# e.g.
# DT 01-FEB-1995 (Rel. 31, Created)
# DT 01-FEB-1995 (Rel. 31, Last sequence update)
# DT 01-OCT-2000 (Rel. 40, Last annotation update)
#
# or:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
# ...
# find where the version information will be located
# This is needed for when you have cases like IPI where
# the release verison is in a different spot:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
uprcols = uprline.split()
rel_index = -1
for index in range(len(uprcols)):
if uprcols[index].find("REL.") >= 0:
rel_index = index
assert rel_index >= 0, \
"Could not find Rel. in DT line: %s" % line
version_index = rel_index + 1
# get the version information
str_version = cols[version_index].rstrip(",")
# no version number
if str_version == '':
version = 0
# dot versioned
elif str_version.find(".") >= 0:
version = str_version
# integer versioned
else:
version = int(str_version)
date = cols[0]
if 'CREATED' in uprline:
record.created = date, version
elif 'LAST SEQUENCE UPDATE' in uprline:
record.sequence_update = date, version
elif 'LAST ANNOTATION UPDATE' in uprline:
record.annotation_update = date, version
else:
assert False, "Shouldn't reach this line!"
elif 'INTEGRATED INTO' in uprline \
or 'SEQUENCE VERSION' in uprline \
or 'ENTRY VERSION' in uprline:
# New style DT line
# =================
# As of UniProt Knowledgebase release 7.0 (including
# Swiss-Prot release 49.0 and TrEMBL release 32.0) the
# format of the DT lines and the version information
# in them was changed - the release number was dropped.
#
# For more information see bug 1948 and
# http://ca.expasy.org/sprot/relnotes/sp_news.html#rel7.0
#
# e.g.
# DT 01-JAN-1998, integrated into UniProtKB/Swiss-Prot.
# DT 15-OCT-2001, sequence version 3.
# DT 01-APR-2004, entry version 14.
#
#This is a new style DT line...
# The date should be in string cols[1]
# Get the version number if there is one.
# For the three DT lines above: 0, 3, 14
try:
version = int(cols[-1])
except ValueError:
version = 0
date = cols[0].rstrip(",")
# Re-use the historical property names, even though
# the meaning has changed slighty:
if "INTEGRATED" in uprline:
record.created = date, version
elif 'SEQUENCE VERSION' in uprline:
record.sequence_update = date, version
elif 'ENTRY VERSION' in uprline:
record.annotation_update = date, version
else:
assert False, "Shouldn't reach this line!"
else:
raise ValueError("I don't understand the date line %s" % line)
def _read_ox(record, line):
# The OX line is in the format:
# OX DESCRIPTION=ID[, ID]...;
# If there are too many id's to fit onto a line, then the ID's
# continue directly onto the next line, e.g.
# OX DESCRIPTION=ID[, ID]...
# OX ID[, ID]...;
# Currently, the description is always "NCBI_TaxID".
# To parse this, I need to check to see whether I'm at the
# first line. If I am, grab the description and make sure
# it's an NCBI ID. Then, grab all the id's.
if record.taxonomy_id:
ids = line[5:].rstrip().rstrip(";")
else:
descr, ids = line[5:].rstrip().rstrip(";").split("=")
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
record.taxonomy_id.extend(ids.split(', '))
def _read_oh(record, line):
# Line type OH (Organism Host) for viral hosts
assert line[5:].startswith("NCBI_TaxID="), "Unexpected %s" % line
line = line[16:].rstrip()
assert line[-1]=="." and line.count(";")==1, line
taxid, name = line[:-1].split(";")
record.host_taxonomy_id.append(taxid.strip())
record.host_organism.append(name.strip())
def _read_rn(reference, rn):
assert rn[0] == '[' and rn[-1] == ']', "Missing brackets %s" % rn
reference.number = int(rn[1:-1])
def _read_rc(reference, value):
cols = value.split(';')
if value[-1]==';':
unread = ""
else:
cols, unread = cols[:-1], cols[-1]
for col in cols:
if not col: # last column will be the empty string
return
# The token is everything before the first '=' character.
i = col.find("=")
if i>=0:
token, text = col[:i], col[i+1:]
comment = token.lstrip(), text
reference.comments.append(comment)
else:
comment = reference.comments[-1]
comment = "%s %s" % (comment, col)
reference.comments[-1] = comment
return unread
def _read_rx(reference, value):
# The basic (older?) RX line is of the form:
# RX MEDLINE; 85132727.
# but there are variants of this that need to be dealt with (see below)
# CLD1_HUMAN in Release 39 and DADR_DIDMA in Release 33
# have extraneous information in the RX line. Check for
# this and chop it out of the line.
# (noticed by katel@worldpath.net)
value = value.replace(' [NCBI, ExPASy, Israel, Japan]','')
# RX lines can also be used of the form
# RX PubMed=9603189;
# reported by edvard@farmasi.uit.no
# and these can be more complicated like:
# RX MEDLINE=95385798; PubMed=7656980;
# RX PubMed=15060122; DOI=10.1136/jmg 2003.012781;
# We look for these cases first and deal with them
warn = False
if "=" in value:
cols = value.split("; ")
cols = [x.strip() for x in cols]
cols = [x for x in cols if x]
for col in cols:
x = col.split("=")
if len(x) != 2 or x == ("DOI", "DOI"):
warn = True
break
assert len(x) == 2, "I don't understand RX line %s" % value
reference.references.append((x[0], x[1].rstrip(";")))
# otherwise we assume we have the type 'RX MEDLINE; 85132727.'
else:
cols = value.split("; ")
# normally we split into the three parts
if len(cols) != 2:
warn = True
else:
reference.references.append((cols[0].rstrip(";"), cols[1].rstrip(".")))
if warn:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Possibly corrupt RX line %r" % value,
BiopythonParserWarning)
def _read_cc(record, line):
key, value = line[5:8], line[9:].rstrip()
if key=='-!-': # Make a new comment
record.comments.append(value)
elif key==' ': # add to the previous comment
if not record.comments:
# TCMO_STRGA in Release 37 has comment with no topic
record.comments.append(value)
else:
record.comments[-1] += " " + value
def _read_dr(record, value):
# Remove the comments at the end of the line
i = value.find(' [')
if i >= 0:
value = value[:i]
cols = value.rstrip(".").split('; ')
record.cross_references.append(tuple(cols))
def _read_ft(record, line):
line = line[5:] # get rid of junk in front
name = line[0:8].rstrip()
try:
from_res = int(line[9:15])
except ValueError:
from_res = line[9:15].lstrip()
try:
to_res = int(line[16:22])
except ValueError:
to_res = line[16:22].lstrip()
#if there is a feature_id (FTId), store it away
if line[29:35]==r"/FTId=":
ft_id = line[35:70].rstrip()[:-1]
description = ""
else:
ft_id =""
description = line[29:70].rstrip()
if not name: # is continuation of last one
assert not from_res and not to_res
name, from_res, to_res, old_description,old_ft_id = record.features[-1]
del record.features[-1]
description = ("%s %s" % (old_description, description)).strip()
# special case -- VARSPLIC, reported by edvard@farmasi.uit.no
if name == "VARSPLIC":
# Remove unwanted spaces in sequences.
# During line carryover, the sequences in VARSPLIC can get mangled
# with unwanted spaces like:
# 'DISSTKLQALPSHGLESIQT -> PCRATGWSPFRRSSPC LPTH'
# We want to check for this case and correct it as it happens.
descr_cols = description.split(" -> ")
if len(descr_cols) == 2:
first_seq, second_seq = descr_cols
extra_info = ''
# we might have more information at the end of the
# second sequence, which should be in parenthesis
extra_info_pos = second_seq.find(" (")
if extra_info_pos != -1:
extra_info = second_seq[extra_info_pos:]
second_seq = second_seq[:extra_info_pos]
# now clean spaces out of the first and second string
first_seq = first_seq.replace(" ", "")
second_seq = second_seq.replace(" ", "")
# reassemble the description
description = first_seq + " -> " + second_seq + extra_info
record.features.append((name, from_res, to_res, description,ft_id))
if __name__ == "__main__":
print "Quick self test..."
example_filename = "../../Tests/SwissProt/sp008"
import os
if not os.path.isfile(example_filename):
print "Missing test file %s" % example_filename
else:
#Try parsing it!
handle = open(example_filename)
records = parse(handle)
for record in records:
print record.entry_name
print ",".join(record.accessions)
print record.keywords
print repr(record.organism)
print record.sequence[:20] + "..."
handle.close()
|
bryback/quickseq
|
genescript/Bio/SwissProt/__init__.py
|
Python
|
mit
| 20,735
|
[
"Biopython"
] |
a6862585be8d716ac32860dcc62c3a7192c5a748a04ae520ffa69c43b0a82660
|
# -*- coding: utf-8 -*-
__author__ = 'Sindre Nistad'
from pickle import load
import neurolab.train as train
import Database.connector as conn
import Classifier.neural_network as nn
def run():
# conn.bind()
# conn.disconnect(True, True)
conn.connect()
# set_norm_points_table(True)
# set_extended_point_table(True)
# conn.export_to_csv(dataset="AVIRIS", k=0)
k = 3
# dataset = conn.get_dataset_sample('soil', k, normalizing_mode='gaussian', dataset='AVIRIS',
# number_of_samples=-1,
# background_target_ratio=1.5,
# use_stored_normalization_values=False)
f = open('soil.ds.pkl', 'rb')
# dump(dataset, f)
dataset = load(f)
# print(dataset)
minimum = dataset.min()
maximum = dataset.max()
transfer_functions = None # [nltf.TanSig(), nltf.TanSig()]
net = nn.ClassificationNet(minimum, maximum, dataset, k, 'soil', transfer_functions=transfer_functions,
training_function=train.train_gd)
# net.net.errorf = nle.MSE()
net.net.init()
net.divide_dataset(0.25)
net.train(1000, 0.01)
net.net.save('soil.neuralnetwork')
err = net.test()
print(err)
print(err.sum() / err.size)
# pass
run()
|
cLupus/neighborhood_classifier
|
src/test.py
|
Python
|
gpl-3.0
| 1,334
|
[
"Gaussian"
] |
8b0fd0e5270a4d8507de7412912e151f735a5e6b6975b1336561d58df93846c7
|
#!/usr/bin/python3
# coding=utf-8
"""reads barometric pressure sensor and writes it to UDP socket with timestamp available
"""
import socket
from datetime import datetime
from struct import pack
from time import sleep
from time import time
from os import _exit as dumbnrun
import ms5637
__author__ = 'Moe'
__copyright__ = 'Copyright 2017-2018 Moe'
__license__ = 'MIT'
__version__ = '0.0.3'
# Bari sensor of MS5637
sensor = ms5637.Chip()
host = "192.168.0.2" # The BIG machine for the number grinding
port = 6421 # bari port
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
try:
epochtime = time()
humantime = datetime.fromtimestamp(epochtime).strftime('%Y-%m-%dT%H:%M:%S')
pressure, temperature = sensor.get_data()
print(humantime, pressure)
outstring = pack('!d', *[pressure]) # .pack('!d', )
# outstring = pack('!2d',*[pressure, temperature])
# outstring = pack('!2d',*[epochtime, pressure])
sock.sendto(outstring, (host, port))
sleep(.1)
# print(humantime, pressure)
# outstring = str(humantime) + ', ' + str(pressure)
except OSError:
sensor.__init__()
pressure, temperature = sensor.get_data()
except KeyboardInterrupt:
sock.close() # from os import _exit as dumbnrun
dumbnrun(0) # https://bytes.com/topic/python/answers/156121-os-_exit-vs-sys-exit
#
# Someday a cleaner Python interface will live here
#
# End
|
wadda/Bari
|
udp_spitter.py
|
Python
|
mit
| 1,474
|
[
"MOE"
] |
0088b89d5258863ad1ee29cabd06a78b298f9dffcccdf7688dae167483fa00d6
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This examples adds a text add that uses upgraded URLs.
This will only work if your account is a test account or is whitelisted for this
feature.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupAdService.mutate
"""
__author__ = 'Mark Saniscalchi'
from googleads import adwords
from googleads import errors
ADGROUP_ID = 'INSERT_ADGROUP_ID_HERE'
def main(client, adgroup_id):
# Initialize appropriate service.
adgroup_ad_service = client.GetService('AdGroupAdService', version='v201406')
# Create the text ad
text_ad = {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to Mars',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'displayUrl': 'www.example.com',
# Specify a tracking URL for 3rd party tracking provider. You may specify
# one at customer, campaign, ad group, ad, criterion or feed item levels.
'trackingUrlTemplate': ('http://tracker.example.com/?cid={_season}'
'&promocode={_promocode}&u={lpurl}'),
'urlCustomParameters': {
'parameters': [
# Since your tracking URL has two custom parameters, provide
# their values too. This can be provided at campaign, ad group,
# ad, criterion, or feed item levels.
{
'key': 'season',
'value': 'christmas'
},
{
'key': 'promocode',
'value': 'NYC123'
}
]
},
# Specify a list of final URLs. This field cannot be set if URL
# field is set, or finalUrls is unset. This may be specified at ad,
# criterion, and feed item levels.
'finalUrls': [
'http://www.example.com/cruise/space/',
'http://www.example.com/locations/mars/'
],
# Specify a list of final mobile URLs. This field cannot be set if URL
# field is set, or finalUrls is unset. This may be specified at ad,
# criterion, and feed item levels.
'finalMobileUrls': [
'http://mobile.example.com/cruise/space/',
'http://mobile.example.com/locations/mars/'
]
}
text_adgroup_ad = {
'adGroupId': adgroup_id,
'ad': text_ad,
# Optional: Set the status.
'status': 'PAUSED'
}
operations = [{
'operator': 'ADD',
'operand': text_adgroup_ad
}]
response = adgroup_ad_service.mutate(operations)
if 'value' in response:
for adgroup_ad in response['value']:
print ('AdGroupAd with ID %s and display URL \'%s\'was added.'
% (adgroup_ad['ad']['id'], adgroup_ad['ad']['displayUrl']))
print 'Upgraded URL properties:'
print 'Final Urls: %s' % adgroup_ad['ad']['finalUrls']
print 'Final Mobile URLs: %s' % adgroup_ad['ad']['finalMobileUrls']
print ('Tracking URL template: %s'
% adgroup_ad['ad']['trackingUrlTemplate'])
print 'Custom parameters: %s' % adgroup_ad['ad']['urlCustomParameters']
else:
raise errors.GoogleAdsError('Failed to create AdGroupAd.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
|
dietrichc/streamline-ppc-reports
|
examples/adwords/v201406/advanced_operations/add_text_ad_with_upgraded_urls.py
|
Python
|
apache-2.0
| 4,095
|
[
"VisIt"
] |
750e6f1dbd842bb1ecf3b0670e63d685388d5bec0591de731ebea31de1340c88
|
# Made by Vice - cleanup by DrLecter
# this script is part of the Official L2J Datapack Project.
# Visit http://forum.l2jdp.com for more details.
import sys
from com.l2scoria import Config
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "618_IntoTheFlame"
#NPCs
KLEIN = 31540
HILDA = 31271
#QUEST ITEMS
VACUALITE_ORE,VACUALITE,FLOATING_STONE = range(7265,7268)
#CHANCE
CHANCE_FOR_QUEST_ITEMS = 50
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
cond = st.getInt("cond")
if event == "31540-03.htm" and cond == 0 :
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "31540-05.htm" :
if st.getQuestItemsCount(VACUALITE) and cond == 4 :
st.takeItems(VACUALITE,1)
st.giveItems(FLOATING_STONE,1)
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
else :
htmltext = "31540-03.htm"
elif event == "31271-02.htm" and cond == 1 :
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
elif event == "31271-05.htm" :
if cond == 3 and st.getQuestItemsCount(VACUALITE_ORE) == 50 :
st.takeItems(VACUALITE_ORE,-1)
st.giveItems(VACUALITE,1)
st.set("cond","4")
st.playSound("ItemSound.quest_middle")
else :
htmltext = "31271-03.htm"
return htmltext
def onTalk (self,npc,player) :
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
cond = st.getInt("cond")
id = st.getState()
if npcId == KLEIN :
if cond == 0 :
if player.getLevel() < 60 :
htmltext = "31540-01.htm"
st.exitQuest(1)
else :
htmltext = "31540-02.htm"
elif cond == 4 and st.getQuestItemsCount(VACUALITE) :
htmltext = "31540-04.htm"
else :
htmltext = "31540-03.htm"
elif npcId == HILDA :
if cond == 1 :
htmltext = "31271-01.htm"
elif cond == 3 and st.getQuestItemsCount(VACUALITE_ORE) == 50 :
htmltext = "31271-04.htm"
elif cond == 4 :
htmltext = "31271-06.htm"
else :
htmltext = "31271-03.htm"
return htmltext
def onKill(self,npc,player,isPet) :
partyMember = self.getRandomPartyMember(player,"2")
if not partyMember : return
st = partyMember.getQuestState(qn)
if not st : return
count = st.getQuestItemsCount(VACUALITE_ORE)
if st.getInt("cond") == 2 and count < 50 :
chance = CHANCE_FOR_QUEST_ITEMS * Config.RATE_DROP_QUEST
numItems, chance = divmod(chance,100)
if st.getRandom(100) < chance :
numItems += 1
if numItems :
if count + numItems >= 50 :
numItems = 50 - count
st.playSound("ItemSound.quest_middle")
st.set("cond","3")
else:
st.playSound("ItemSound.quest_itemget")
st.giveItems(VACUALITE_ORE,int(numItems))
return
QUEST = Quest(618,qn,"Into the Flame")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(KLEIN)
QUEST.addTalkId(KLEIN)
QUEST.addTalkId(HILDA)
for mob in range(21274,21278)+range(21282,21286)+range(21290,21294) :
QUEST.addKillId(mob)
for item in range(7265,7267) :
STARTED.addQuestDrop(KLEIN,item,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/618_IntoTheFlame/__init__.py
|
Python
|
gpl-3.0
| 3,899
|
[
"VisIt"
] |
315a306b995479bf8a0f80197997941e5902999043aaf873916cca387163336a
|
"""
Acceptance tests for the Import and Export pages
"""
from nose.plugins.attrib import attr
from datetime import datetime
from flaky import flaky
from abc import abstractmethod
from bok_choy.promise import EmptyPromise
from .base_studio_test import StudioLibraryTest, StudioCourseTest
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.import_export import ExportLibraryPage, ExportCoursePage, ImportLibraryPage, ImportCoursePage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.container import ContainerPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.staff_view import StaffPage
class ExportTestMixin(object):
"""
Tests to run both for course and library export pages.
"""
def test_export(self):
"""
Scenario: I am able to export a course or library
Given that I have a course or library
And I click the download button
The download will succeed
And the file will be of the right MIME type.
"""
good_status, is_tarball_mimetype = self.export_page.download_tarball()
self.assertTrue(good_status)
self.assertTrue(is_tarball_mimetype)
@attr('shard_7')
class TestCourseExport(ExportTestMixin, StudioCourseTest):
"""
Export tests for courses.
"""
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a course.
Given that I have a course to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Course Export')
@attr('shard_7')
class TestLibraryExport(ExportTestMixin, StudioLibraryTest):
"""
Export tests for libraries.
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(TestLibraryExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a library.
Given that I have a library to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Library Export')
class BadExportMixin(object):
"""
Test mixin for bad exports.
"""
def test_bad_export(self):
"""
Scenario: I should receive an error when attempting to export a broken course or library.
Given that I have a course or library
No error modal should be showing
When I click the export button
An error modal should be shown
When I click the modal's action button
I should arrive at the edit page for the broken component
"""
# No error should be there to start.
self.assertFalse(self.export_page.is_error_modal_showing())
self.export_page.click_export()
self.export_page.wait_for_error_modal()
self.export_page.click_modal_button()
EmptyPromise(
lambda: self.edit_page.is_browser_on_page,
'Arrived at component edit page',
timeout=30
)
@attr('shard_7')
class TestLibraryBadExport(BadExportMixin, StudioLibraryTest):
"""
Verify exporting a bad library causes an error.
"""
def setUp(self):
"""
Set up the pages and start the tests.
"""
super(TestLibraryBadExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.edit_page = LibraryEditPage(self.browser, self.library_key)
self.export_page.visit()
def populate_library_fixture(self, library_fixture):
"""
Create a library with a bad component.
"""
library_fixture.add_children(
XBlockFixtureDesc("problem", "Bad Problem", data='<'),
)
@attr('shard_7')
class TestCourseBadExport(BadExportMixin, StudioCourseTest):
"""
Verify exporting a bad course causes an error.
"""
ready_method = 'wait_for_component_menu'
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseBadExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.edit_page = ContainerPage(self.browser, self.unit.locator)
self.export_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the course with a unit that has a bad problem.
"""
self.unit = XBlockFixtureDesc('vertical', 'Unit')
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Main Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection').add_children(
self.unit.add_children(
XBlockFixtureDesc("problem", "Bad Problem", data='<')
)
)
)
)
@attr('shard_7')
class ImportTestMixin(object):
"""
Tests to run for both course and library import pages.
"""
def setUp(self):
super(ImportTestMixin, self).setUp()
self.import_page = self.import_page_class(*self.page_args())
self.landing_page = self.landing_page_class(*self.page_args())
self.import_page.visit()
@abstractmethod
def page_args(self):
"""
Generates the args for initializing a page object.
"""
return []
@flaky # TODO, fix this: TNL-4191
def test_upload(self):
"""
Scenario: I want to upload a course or library for import.
Given that I have a library or course to import into
And I have a valid .tar.gz file containing data to replace it with
I can select the file and upload it
And the page will give me confirmation that it uploaded successfully
"""
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
def test_import_timestamp(self):
"""
Scenario: I perform a course / library import
On import success, the page displays a UTC timestamp previously not visible
And if I refresh the page, the timestamp is still displayed
"""
self.assertFalse(self.import_page.is_timestamp_visible())
# Get the time when the import has started.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_start_time = datetime.utcnow().replace(microsecond=0, second=0)
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
# Get the time when the import has finished.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_finish_time = datetime.utcnow().replace(microsecond=0, second=0)
import_timestamp = self.import_page.parsed_timestamp
self.import_page.wait_for_timestamp_visible()
# Verify that 'import_timestamp' is between start and finish upload time
self.assertLessEqual(
upload_start_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.assertGreaterEqual(
upload_finish_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.import_page.visit()
self.import_page.wait_for_tasks(completed=True)
self.import_page.wait_for_timestamp_visible()
def test_landing_url(self):
"""
Scenario: When uploading a library or course, a link appears for me to view the changes.
Given that I upload a library or course
A button will appear that contains the URL to the library or course's main page
"""
self.import_page.upload_tarball(self.tarball_name)
self.assertEqual(self.import_page.finished_target_url(), self.landing_page.url)
def test_bad_filename_error(self):
"""
Scenario: I should be reprimanded for trying to upload something that isn't a .tar.gz file.
Given that I select a file that is an .mp4 for upload
An error message will appear
"""
self.import_page.upload_tarball('funny_cat_video.mp4')
self.import_page.wait_for_filename_error()
def test_task_list(self):
"""
Scenario: I should see feedback checkpoints when uploading a course or library
Given that I am on an import page
No task checkpoint list should be showing
When I upload a valid tarball
Each task in the checklist should be marked confirmed
And the task list should be visible
"""
# The task list shouldn't be visible to start.
self.assertFalse(self.import_page.is_task_list_showing(), "Task list shown too early.")
self.import_page.wait_for_tasks()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_tasks(completed=True)
self.assertTrue(self.import_page.is_task_list_showing(), "Task list did not display.")
def test_bad_import(self):
"""
Scenario: I should see a failed checklist when uploading an invalid course or library
Given that I am on an import page
And I upload a tarball with a broken XML file
The tasks should be confirmed up until the 'Updating' task
And the 'Updating' task should be marked failed
And the remaining tasks should not be marked as started
"""
self.import_page.upload_tarball(self.bad_tarball_name)
self.import_page.wait_for_tasks(fail_on='Updating')
@attr('shard_7')
class TestEntranceExamCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = 'entrance_exam_course.2015.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
def test_course_updated_with_entrance_exam(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section' or 'Entrance Exam'
When I visit the import page
And I upload a course that has an entrance exam section named 'Entrance Exam'
And I visit the course outline page again
The section named 'Entrance Exam' should now be available.
And when I switch the view mode to student view and Visit CourseWare
Then I see one section in the sidebar that is 'Entrance Exam'
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.assertRaises(IndexError, self.landing_page.section, "Entrance Exam")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There should be two sections. 'Entrance Exam' and 'Section' on the landing page.
self.landing_page.section("Entrance Exam")
self.landing_page.section("Section")
self.landing_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
self.assertIn(
"To access course materials, you must score", courseware.entrance_exam_message_selector.text[0]
)
@attr('shard_7')
class TestCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = '2015.lzdwNM.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
def test_course_updated(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section'
When I visit the import page
And I upload a course that has a section named 'Section'
And I visit the course outline page again
The section named 'Section' should now be available
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There's a section named 'Section' in the tarball.
self.landing_page.section("Section")
def test_header(self):
"""
Scenario: I should see the correct text when importing a course.
Given that I have a course to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Course Import')
def test_multiple_course_import_message(self):
"""
Given that I visit an empty course before import
When I visit the import page
And I upload a course with file name 2015.lzdwNM.tar.gz
Then timestamp is visible after course is updated successfully
And then I create a new course
When I visit the import page of this new course
Then timestamp is not visible
"""
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.assertTrue(self.import_page.is_timestamp_visible())
# Create a new course and visit the import page
self.course_info = {
'org': 'orgX',
'number': self.unique_id + '_2',
'run': 'test_run_2',
'display_name': 'Test Course 2' + self.unique_id
}
self.install_course_fixture()
self.import_page = self.import_page_class(*self.page_args())
self.import_page.visit()
# As this is new course which is never import so timestamp should not present
self.assertFalse(self.import_page.is_timestamp_visible())
@attr('shard_7')
class TestLibraryImport(ImportTestMixin, StudioLibraryTest):
"""
Tests the Library import page
"""
tarball_name = 'library.HhJfPD.tar.gz'
bad_tarball_name = 'bad_library.tar.gz'
import_page_class = ImportLibraryPage
landing_page_class = LibraryEditPage
def page_args(self):
return [self.browser, self.library_key]
@flaky # TODO: SOL-430
def test_library_updated(self):
"""
Given that I visit an empty library
No XBlocks should be shown
When I visit the import page
And I upload a library that contains three XBlocks
And I visit the library page
Three XBlocks should be shown
"""
self.landing_page.visit()
self.landing_page.wait_until_ready()
# No items should be in the library to start.
self.assertEqual(len(self.landing_page.xblocks), 0)
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
self.landing_page.wait_until_ready()
# There are three blocks in the tarball.
self.assertEqual(len(self.landing_page.xblocks), 3)
def test_header(self):
"""
Scenario: I should see the correct text when importing a library.
Given that I have a library to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Library Import')
|
solashirai/edx-platform
|
common/test/acceptance/tests/studio/test_import_export.py
|
Python
|
agpl-3.0
| 16,957
|
[
"VisIt"
] |
b18909a1b6a0b7740eeca58640823e0a614117caa7bd743d7c57a088b342c752
|
#! /usr/bin/env python3
###############################################################################
# #
# GALORE: Gaussian and Lorentzian broadening for simulated spectra #
# #
# Developed by Adam J. Jackson and Alex Ganose #
# at University College London (2017) #
# #
###############################################################################
# #
# This file is part of Galore. Galore is free software: you can redistribute #
# it and/or modify it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. This program is distributed in the #
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the #
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. You should have #
# received a copy of the GNU General Public License along with this program. #
# If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import argparse
from collections import OrderedDict
from json import load as json_load
import logging
import warnings
import numpy as np
import galore
import galore.formats
import galore.plot
from galore import auto_limits
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
def main():
logging.basicConfig(filename='galore.log', level=logging.INFO)
console = logging.StreamHandler()
logging.getLogger().addHandler(console)
warnings.filterwarnings("ignore", module="matplotlib")
warnings.filterwarnings("ignore", module="pymatgen")
parser = get_parser()
args = parser.parse_args()
args = vars(args)
run(**args)
def run(**kwargs):
if kwargs['sampling']:
pass
elif kwargs['units'] in ('cm', 'cm-1'):
kwargs['sampling'] = 0.1
elif kwargs['units'] in ('THz', 'thz'):
kwargs['sampling'] = 1e-3
elif kwargs['units'] in ('ev', 'eV'):
kwargs['sampling'] = 1e-2
else:
kwargs['sampling'] = 1e-2
if kwargs['pdos']:
pdos_from_files(**kwargs)
else:
simple_dos_from_files(**kwargs)
def pdos_from_files(return_plt=False, **kwargs):
"""Read input data, process for PDOS before plotting and/or writing
Args:
return_plt (bool): If True, return the pyplot object instead of writing
or displaying plot output.
**kwargs: See command reference for full argument list
"""
pdos_plotting_data = galore.process_pdos(**kwargs)
# For plotting and writing, "None" means "write to screen"
# while False means "do nothing"
if kwargs['plot'] or kwargs['plot'] is None:
if 'style' in kwargs and kwargs['style'] is not None:
import matplotlib.pyplot
matplotlib.pyplot.style.use(kwargs['style'])
plt = galore.plot.plot_pdos(pdos_plotting_data,
**kwargs) # flipx is included in kwargs
if kwargs['overlay'] is not None:
plt = galore.plot.add_overlay(
plt, kwargs['overlay'],
overlay_offset=kwargs['overlay_offset'],
overlay_scale=kwargs['overlay_scale'],
overlay_style=kwargs['overlay_style'],
overlay_label=kwargs['overlay_label'])
plt.legend(loc='best')
xlabel = galore.plot.guess_xlabel(units=kwargs['units'],
flipx=kwargs['flipx'],
energy_label=None)
plt.xlabel(xlabel)
if kwargs['ylabel'] is not None:
plt.ylabel(kwargs['ylabel'])
if return_plt:
return plt
elif kwargs['plot']:
plt.savefig(kwargs['plot'])
elif kwargs['plot'] is None:
plt.show()
if kwargs['csv'] or kwargs['csv'] is None:
galore.formats.write_pdos(pdos_plotting_data,
filename=kwargs['csv'],
filetype='csv',
flipx=kwargs['flipx'])
if kwargs['txt'] or kwargs['txt'] is None:
galore.formats.write_pdos(pdos_plotting_data,
filename=kwargs['txt'],
filetype='txt',
flipx=kwargs['flipx'])
def simple_dos_from_files(return_plt=False, **kwargs):
"""Generate a spectrum or DOS over one data series
kwargs['input'] can be a string or a list containing one string.
In addition to main kwargs documented for CLI
Args:
return_plt (bool): If True, return the pyplot object instead of writing
or displaying plot output.
**kwargs: See command reference for full argument list
"""
x_values, broadened_data = galore.process_1d_data(**kwargs)
if not any(((kwargs['csv'] is None), (kwargs['txt'] is None),
(kwargs['plot'] is None),
kwargs['csv'], kwargs['txt'], kwargs['plot'])):
print("No output selected. Please use at least one of the output "
"options (CSV, txt, plotting). For usage information, run "
"galore with -h argument.")
if kwargs['plot'] or kwargs['plot'] is None:
if not has_matplotlib:
print("Can't plot, no Matplotlib")
else:
if 'style' in kwargs and kwargs['style'] is not None:
import matplotlib.pyplot
matplotlib.pyplot.style.use(kwargs['style'])
plt = galore.plot.plot_tdos(x_values, broadened_data, **kwargs)
if kwargs['ylabel'] is not None:
plt.ylabel(kwargs['ylabel'])
if return_plt:
return plt
elif kwargs['plot']:
plt.savefig(kwargs['plot'])
else:
plt.show()
if kwargs['csv'] is None:
galore.formats.write_csv(x_values, broadened_data, filename=None)
elif kwargs['csv']:
galore.formats.write_csv(
x_values, broadened_data, filename=kwargs['csv'])
if kwargs['txt'] is None:
galore.formats.write_txt(x_values, broadened_data, filename=None)
elif kwargs['txt']:
galore.formats.write_txt(
x_values, broadened_data, filename=kwargs['txt'])
def get_parser():
"""Parse command-line arguments. Function is used to build the CLI docs."""
parser = argparse.ArgumentParser()
parser.add_argument(
'input', type=str, default='vasprun.xml', nargs='+',
help='Input data file. The supported formats are vasprun.xml (VASP '
'output), *.gpw (GPAW output), *.csv (comma-delimited text) '
'and *.txt (space-delimited text).')
parser.add_argument(
'-l',
'--lorentzian',
nargs='?',
default=False,
const=2,
type=float,
help='Apply Lorentzian broadening with specified width.')
parser.add_argument(
'-g',
'--gaussian',
nargs='?',
default=False,
const=2,
type=float,
help='Apply Gaussian broadening with specified width.')
parser.add_argument(
'-w', '--weighting',
type=str,
default=None,
help='Apply cross-section weighting to data. "alka", "he2" and '
'"yeh_haxpes" select tabulated data for valence band at 1486.6 '
'eV, 40.8 eV and 8047.8 eV respectively. '
'Numerical values will be interpreted as an energy in keV; '
'for energies from 1-1500 eV cross-sections will be determined '
'using a parametrisation from tabulated data. '
'Alternatively, provide '
'path to a JSON file with cross-section data.')
parser.add_argument(
'--units',
'--x_units',
type=str,
default='',
choices=('cm', 'cm-1', 'thz', 'THz',
'ev', 'eV', 'ry', 'Ry', 'ha', 'Ha'),
help='Units for x axis (usually frequency or energy)')
parser.add_argument(
'--ylabel',
type=str,
default=None,
help='Label for plot y-axis')
parser.add_argument(
'--txt',
nargs='?',
default=False,
const=None,
help='Write broadened output as space-delimited text; file if path '
'provided, otherwise write to standard output.')
parser.add_argument(
'--csv',
nargs='?',
default=False,
const=None,
help='Write broadened output as comma-separated values; file if path '
'provided, otherwise write to standard output.')
parser.add_argument(
'-p',
'--plot',
nargs='?',
default=False,
const=None,
help='Plot broadened spectrum. Plot to filename if provided,'
' otherwise display to screen.')
parser.add_argument(
'-d',
'--sampling',
type=float,
default=False,
help='Width, in units of x, of x-axis resolution. If not specified, '
'default value is based on units. If units are not specified, '
'default value is 1e-2.')
parser.add_argument(
'-k',
'--spikes',
'--spike',
action='store_true',
help='Resample data as "spikes" on a zero baseline. The default is to '
'interpolate linearly between y-values, which is reasonable for '
'distributions such as DOS. If the input data set only contains '
'active energies/frequencies (e.g. IR modes) then you should use '
'--spike mode. See tutorials for examples.')
parser.add_argument(
'--pdos', action="store_true", help='Use orbital-projected data')
parser.add_argument(
'--flipx', '--xflip', action="store_true",
help='Negate x-values in output; this may be helpful for comparison '
'with binding energy measurments.')
parser.add_argument(
'--xmin', type=float, default=None, help='Minimum x axis value')
parser.add_argument(
'--xmax', type=float, default=None, help='Maximum x axis value')
parser.add_argument(
'--ymin', type=float, default=0, help='Minimum y axis value')
parser.add_argument(
'--ymax', type=float, default=None, help='Maximum y axis value')
parser.add_argument(
'--style', type=str, nargs='+', default=['seaborn-colorblind'],
help='Plotting style: a sequence of matplotlib styles and paths to '
'style files. The default palette is called "seaborn-colorblind".'
)
parser.add_argument(
'--overlay', type=str, default=None, help='Data file for overlay')
parser.add_argument(
'--overlay_scale', type=float, default=None,
help='Y-axis scale factor for data overlay')
parser.add_argument(
'--overlay_offset', type=float, default=0,
help='X-axis offset for data overlay')
parser.add_argument(
'--overlay_style', type=str, default='o',
help='Matplotlib line style for overlay data. Default "o" for '
'circles, "x:" for crosses joined by dotted lines, etc.')
parser.add_argument(
'--overlay_label', type=str, default=None,
help='Legend label for data overlay'
)
return parser
if __name__ == '__main__':
main()
|
SMTG-UCL/galore
|
galore/cli/galore.py
|
Python
|
gpl-3.0
| 12,020
|
[
"GPAW",
"Gaussian",
"VASP",
"pymatgen"
] |
9f90ef0dc184d384b3beeddb07c23b863cb4c33dbfedb7f3d3b1a94450ddb860
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import unittest
from zope.interface import implementedBy
from zope.interface.verify import verifyClass
from zope.interface.exceptions import Invalid
from stoqlib.lib.introspection import get_interfaces_for_package
class TestInterfaces(unittest.TestCase):
def test_interfaces(self):
for klass in get_interfaces_for_package('stoqlib'):
for iface in implementedBy(klass):
try:
verifyClass(iface, klass)
except Invalid as err:
self.fail("%s(%s): %s" % (klass.__name__,
iface.__name__, err))
|
andrebellafronte/stoq
|
tests/test_interfaces.py
|
Python
|
gpl-2.0
| 1,529
|
[
"VisIt"
] |
574310fbd88d9a96d617b4fa629512e1636aff2318dce6bc3ecf9999b0d1ad1c
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create source
source = vtk.vtkCylinderSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
source.SetHeight(7.0)
source.SetResolution(100.0)
apd=vtk.vtk
source2 = vtk.vtkCylinderSource()
source2.SetCenter(20,0,0)
source2.SetRadius(2.0)
source2.SetHeight(10.0)
source2.SetResolution(100.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInput(source2.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor2= vtk.vtkActor()
actor2.SetMapper(mapper2)
# assign actor to the renderer
ren.AddActor(actor)
ren.AddActor(actor2)
plane=vtk.vtkPlane()
plane.SetOrigin(0,0,5)
plane.SetNormal(0,0,1)
#create cutter
cutter=vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetInputConnection(source.GetOutputPort())
cutter.Update()
cutterMapper=vtk.vtkPolyDataMapper()
cutterMapper.SetInputConnection( cutter.GetOutputPort())
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
# <codecell>
#!/usr/bin/env python
# In this example vtkClipPolyData is used to cut a polygonal model
# of a cow in half. In addition, the open clip is closed by triangulating
# the resulting complex polygons.
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.colors import peacock, tomato
VTK_DATA_ROOT = vtkGetDataRoot()
# First start by reading a cow model. We also generate surface normals for
# prettier rendering.
source = vtk.vtkCylinderSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
source.SetHeight(7.0)
source.SetResolution(100.0)
source2 = vtk.vtkCylinderSource()
source2.SetCenter(20,0,0)
source2.SetRadius(2.0)
source2.SetHeight(10.0)
source2.SetResolution(100.0)
cowNormals = vtk.vtkPolyDataNormals()
cowNormals.SetInputConnection(source.GetOutputPort())
cowNormals2 = vtk.vtkPolyDataNormals()
cowNormals2.SetInputConnection(source2.GetOutputPort())
# We clip with an implicit function. Here we use a plane positioned near
# the center of the cow model and oriented at an arbitrary angle.
plane = vtk.vtkPlane()
plane.SetOrigin(0, 3, 0)
plane.SetNormal(0, 1, 0)
# Here we are cutting the cow. Cutting creates lines where the cut
# function intersects the model. (Clipping removes a portion of the
# model but the dimension of the data does not change.)
#
# The reason we are cutting is to generate a closed polygon at the
# boundary of the clipping process. The cutter generates line
# segments, the stripper then puts them together into polylines. We
# then pull a trick and define polygons using the closed line
# segements that the stripper created.
cutEdges = vtk.vtkCutter()
cutEdges.SetInputConnection(cowNormals.GetOutputPort())
cutEdges.SetCutFunction(plane)
cutEdges.GenerateCutScalarsOn()
cutEdges.SetValue(0, 0.5)
cutStrips = vtk.vtkStripper()
cutStrips.SetInputConnection(cutEdges.GetOutputPort())
cutStrips.Update()
cutPoly = vtk.vtkPolyData()
cutPoly.SetPoints(cutStrips.GetOutput().GetPoints())
cutPoly.SetPolys(cutStrips.GetOutput().GetLines())
cutEdges2 = vtk.vtkCutter()
cutEdges2.SetInputConnection(cowNormals2.GetOutputPort())
cutEdges2.SetCutFunction(plane)
cutEdges2.GenerateCutScalarsOn()
cutEdges2.SetValue(0, 0.5)
cutStrips2 = vtk.vtkStripper()
cutStrips2.SetInputConnection(cutEdges2.GetOutputPort())
cutStrips2.Update()
cutPoly2 = vtk.vtkPolyData()
cutPoly2.SetPoints(cutStrips2.GetOutput().GetPoints())
cutPoly2.SetPolys(cutStrips2.GetOutput().GetLines())
# Triangle filter is robust enough to ignore the duplicate point at
# the beginning and end of the polygons and triangulate them.
cutTriangles = vtk.vtkTriangleFilter()
cutTriangles.SetInput(cutPoly)
cutMapper = vtk.vtkPolyDataMapper()
cutMapper.SetInput(cutPoly)
cutMapper.SetInputConnection(cutTriangles.GetOutputPort())
cutActor = vtk.vtkActor()
cutActor.SetMapper(cutMapper)
cutActor.GetProperty().SetColor(peacock)
cutTriangles2 = vtk.vtkTriangleFilter()
cutTriangles2.SetInput(cutPoly2)
cutMapper2 = vtk.vtkPolyDataMapper()
cutMapper2.SetInput(cutPoly2)
cutMapper2.SetInputConnection(cutTriangles2.GetOutputPort())
cutActor2 = vtk.vtkActor()
cutActor2.SetMapper(cutMapper2)
cutActor2.GetProperty().SetColor(tomato)
# Create graphics stuff
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(cutActor)
ren.AddActor(cutActor2)
ren.SetBackground(1, 1, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(30)
ren.GetActiveCamera().Elevation(30)
ren.GetActiveCamera().Dolly(1.5)
ren.ResetCameraClippingRange()
renWin.SetSize(300, 300)
iren.Initialize()
# Lets you move the cut plane back and forth by invoking the function
# Cut with the appropriate plane value (essentially a distance from
# the original plane). This is not used in this code but should give
# you an idea of how to define a function to do this.
def Cut(v):
cutEdges.SetValue(0, v)
cutStrips.Update()
cutPoly.SetPoints(cutStrips.GetOutput().GetPoints())
cutPoly.SetPolys(cutStrips.GetOutput().GetLines())
cutMapper.Update()
cutEdges2.SetValue(0, v)
cutStrips2.Update()
cutPoly2.SetPoints(cutStrips2.GetOutput().GetPoints())
cutPoly2.SetPolys(cutStrips2.GetOutput().GetLines())
cutMapper2.Update()
renWin.Render()
renWin.Render()
iren.Start()
# <codecell>
# <codecell>
|
FedericoV/FractalMammaryLobule
|
tests/cutting.py
|
Python
|
gpl-3.0
| 5,668
|
[
"VTK"
] |
84090628d3a93cbe1734fe29916448065ad50305901d53534f8617b4434c8188
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
from __future__ import absolute_import
import re
import math
from collections import defaultdict
from .exceptions import *
from . import qcformat
#import molpro_basissets
from . import options
from .pdict import PreservingDict
def harvest_output(outtext):
"""Function to separate portions of a Psi4 output file *outtext*.
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# # Process NRE
# mobj = re.search(r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*$',
# outtext, re.MULTILINE)
# if mobj:
# print('matched nre')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# Process HF UNTESTED
mobj = re.search(
r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
r'(?:.*?)' +
r'(?:Hartree-Fock SCF calculation)' +
r'(?:.*?)' +
r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched hf')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
psivar['HF TOTAL ENERGY'] = mobj.group(2)
# Process DFT-D2 UNTESTED
# mobj = re.search(
# r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
# r'(?:.*?)' +
# r'(?:HF-DFT SCF calculation)' +
# r'(?:.*?)' +
# r'^\s+' + r'(?:Empirical dispersion =)' + r'\s+' + NUMBER + r'\s+hartree\s*' +
# r'(?:.*?)' +
# r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
# outtext, re.MULTILINE | re.DOTALL)
# if mobj:
# print('matched dft-d2')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# psivar['DISPERSION CORRECTION ENERGY'] = mobj.group(2)
# psivar['DFT TOTAL ENERGY'] = mobj.group(3)
# psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(3) - mboj.group(2)
# Process DFT-D3 UNTESTED
# mobj = re.search(
# r'(?:grimme3)' + r'\s*' +
# r'(?:.*?)' +
# r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
# r'(?:.*?)' +
# r'(?:HF-DFT SCF calculation)' +
# r'(?:.*?)' +
# r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
# outtext, re.MULTILINE | re.DOTALL)
# if mobj:
# print('matched dft-d3')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# psivar['DISPERSION CORRECTION ENERGY'] = None
# psivar['DFT TOTAL ENERGY'] = mobj.group(2)
# psivar['DFT FUNCTIONAL TOTAL ENERGY'] = None
# /^((?!PART).)*$/
# Process DFT no-D or internal-D
mobj = re.search(
# r'((?!grimme3).)*' + r'\s*' + # severe negative performance impact
# r'(?:.*?)' +
r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
r'(?:.*?)' +
r'(?:HF-DFT SCF calculation)' +
r'(?:.*?)' +
r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if mobj:
print('matched dft')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
#psivar['DFT TOTAL ENERGY'] = mobj.group(2)
psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(2)
# with negative lookahead
#psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(2)
#psivar['DFT TOTAL ENERGY'] = mobj.group(3)
#psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(3)
# Process DHDFT no-D or internal-D
mobj = re.search(
# negative grimme3 lookahead goes here
#r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
#r'(?:.*?)' +
r'(?:HF-DFT SCF calculation)' +
r'(?:.*?)' +
#r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*' +
#r'(?:.*?)' +
# need a not "Hartree-Fock SCF calculation" here so DFT @@@ MP2 not caught?
r'^\s*' + r'(?:Total (?:RI)?MP2 correlation energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*' +
r'^\s+' + r'(?:(?:RI)?MP2 total energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if mobj:
print('matched dhdft')
#psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
#psivar['DFT TOTAL ENERGY'] = mobj.group(2)
#psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(2)
psivar['DOUBLE-HYBRID CORRECTION ENERGY'] = mobj.group(1)
# Process MP2
mobj = re.search(
r'(?:Hartree-Fock SCF calculation)' +
r'(?:.*?)' +
#r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*' +
#r'(?:.*?)' +
# need a not "Hartree-Fock SCF calculation" here so DFT @@@ MP2 not caught?
r'^\s*' + r'(?:Total RIMP2 correlation energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*' +
r'^\s+' + r'(?:RIMP2 total energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if mobj:
print('matched mp2')
#psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
#psivar['DFT TOTAL ENERGY'] = mobj.group(2)
#psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(2)
psivar['MP2 CORRELATION ENERGY'] = mobj.group(1)
#psivar['DOUBLE-HYBRID CORRECTION ENERGY'] = mobj.group(1)
print(psivar)
# TODO: need to split on 'Q-Chem begins' or 'Quantum Leap' or something
# # Process DFT no-D or internal-D WORKS BUT LOOKAHEAD VERY SLOW
# mobj = re.search(
# r'((?!grimme3).)*' + r'\s*' + # severe negative performance impact
# TODO note neg lookahead insufficient since option could be negated
# r'(?:.*?)' +
# r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
# r'(?:.*?)' +
# r'(?:HF-DFT SCF calculation)' +
# r'(?:.*?)' +
# r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
# outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
# if mobj:
# print('matched dft')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(2)
# psivar['DFT TOTAL ENERGY'] = mobj.group(3)
# psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(3)
# # Process PsiVariables
# mobj = re.search(r'^(?: Variable Map:)\s*' +
# r'^\s*(?:-+)\s*' +
# r'^(.*?)' +
# r'^(?:\s*?)$',
# outtext, re.MULTILINE | re.DOTALL)
#
# if mobj:
# for pv in mobj.group(1).split('\n'):
# submobj = re.search(r'^\s+' + r'"(.+?)"' + r'\s+=>\s+' + NUMBER + r'\s*$', pv)
# if submobj:
# psivar['%s' % (submobj.group(1))] = submobj.group(2)
# Process Completion
mobj = re.search(r'Thank you very much for using Q-Chem. Have a nice day.',
outtext, re.MULTILINE)
if mobj:
psivar['SUCCESS'] = True
return psivar, psivar_coord, psivar_grad
def muster_memory(mem):
"""Transform input *mem* in MB into psi4-type options.
"""
text = ''
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['QCHEM']['QCHEM_MEM_TOTAL']['value'] = int(mem)
#options['QCHEM']['QCHEM_CC_MEMORY']['value'] = int(mem)
#options['QCHEM']['QCHEM_MEM_STATIC']['value'] = int(mem)
for item in options['QCHEM']:
options['QCHEM'][item]['clobber'] = True
return text, options
def muster_basis(bas):
"""Transform input *mem* in MB into psi4-type options.
"""
text = ''
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['QCHEM']['QCHEM_BASIS']['value'] = bas
for item in options['QCHEM']:
options['QCHEM'][item]['clobber'] = True
return text, options
class Infile(qcformat.InputFormat2):
def __init__(self, mem, mol, mtd, der, opt):
qcformat.InputFormat2.__init__(self, mem, mol, mtd, der, opt)
# #print self.method, self.molecule.nactive_fragments()
# if 'sapt' in self.method and self.molecule.nactive_fragments() != 2:
# raise FragmentCountError("""Requested molecule has %d, not 2, fragments.""" % (self.molecule.nactive_fragments()))
#
## # memory in MB --> MW
## self.memory = int(math.ceil(mem / 8.0))
## # auxiliary basis sets
## [self.unaugbasis, self.augbasis, self.auxbasis] = self.corresponding_aux_basis()
def format_infile_string(self):
"""
"""
# Handle memory and comment
cmtcmd = """$comment\n%s\n$end\n\n""" % (self.molecule.tagline)
memcmd, memkw = muster_memory(self.memory)
# Handle molecule and basis set
molcmd, molkw = self.molecule.format_molecule_for_qchem(mixedbas=False)
# TODO mixedbas=True once handling basis sets
# not translating basis at present
_bascmd, baskw = muster_basis(self.basis)
# format global convergence directions
_cdscmd, cdskw = muster_cdsgroup_options()
# Handle calc type and quantum chemical method
mdccmd, mdckw = procedures['energy'][self.method](self.method, self.dertype)
## make options from imdb only user options (currently non-existent). set basis and castup from here.
# Handle driver vs input/default keyword reconciliation
userkw = self.options # p4util.prepare_options_for_modules()
userkw = options.reconcile_options2(userkw, memkw)
userkw = options.reconcile_options2(userkw, molkw)
userkw = options.reconcile_options2(userkw, baskw)
#userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = options.reconcile_options2(userkw, cdskw)
userkw = options.reconcile_options2(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
optcmd = options.prepare_options_for_qchem(userkw)
# Handle text to be passed untouched to psi4
litcmd = ''
# Assemble infile pieces
return cmtcmd + memcmd + molcmd + optcmd + mdccmd + litcmd
#'hf'
#'df-hf'
#'b3lyp'
#'blyp'
#'bp86'
#'fno-ccsd(t)'
#'df-ccsd(t)'
#'fno-df-ccsd(t)'
#'df-b97-d'
#'df-b97-d3'
#'pbe0-2'
#'dsd-pbep86'
#'wb97x-2'
#'DLdf+d'
#'DLdf+d09'
#'df-b3lyp'
#'df-b3lyp-d'
#'df-b3lyp-d3'
#'df-wb97x-d'
def muster_cdsgroup_options():
text = ''
options = defaultdict(lambda: defaultdict(dict))
# options['GLOBALS']['E_CONVERGENCE']['value'] = 8
# options['SCF']['GUESS']['value'] = 'sad'
# options['SCF']['MAXITER']['value'] = 200
options['QCHEM']['QCHEM_MEM_STATIC']['value'] = 512
options['QCHEM']['QCHEM_XC_GRID']['value'] = '000100000302'
options['QCHEM']['QCHEM_THRESH']['value'] = 12
options['QCHEM']['QCHEM_SCF_CONVERGENCE']['value'] = 7
#options['QCHEM']['QCHEM_INTEGRALS_BUFFER']['value'] = 512
options['QCHEM']['QCHEM_MAX_SCF_CYCLES']['value'] = 200
options['QCHEM']['QCHEM_SYM_IGNORE']['value'] = True
options['QCHEM']['QCHEM_SYMMETRY']['value'] = False
options['QCHEM']['QCHEM_INTEGRALS_BUFFER']['value'] = 512
return text, options
def muster_modelchem(name, dertype):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
if dertype == 0:
options['QCHEM']['QCHEM_JOBTYPE']['value'] = 'SP'
# text += """energy('"""
else:
raise ValidationError("""Requested Psi4 dertype %d is not available.""" % (dertype))
if lowername == 'wb97x-v':
options['QCHEM']['QCHEM_EXCHANGE']['value'] = 'omegaB97X-V'
# text += """mp2')\n\n"""
#
# elif lowername == 'df-mp2':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['MP2']['MP2_TYPE']['value'] = 'df'
# text += """mp2')\n\n"""
#
# elif lowername == 'sapt0':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# text += """sapt0')\n\n"""
#
# elif lowername == 'sapt2+':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SAPT']['NAT_ORBS_T2']['value'] = True
# options['SAPT']['NAT_ORBS_T3']['value'] = True
# options['SAPT']['NAT_ORBS_V4']['value'] = True
# options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
# text += """sapt2+')\n\n"""
#
# elif lowername == 'sapt2+(3)':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SAPT']['NAT_ORBS_T2']['value'] = True
# options['SAPT']['NAT_ORBS_T3']['value'] = True
# options['SAPT']['NAT_ORBS_V4']['value'] = True
# options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
# text += """sapt2+(3)')\n\n"""
#
# elif lowername == 'sapt2+3(ccd)':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SAPT']['NAT_ORBS_T2']['value'] = True
# options['SAPT']['NAT_ORBS_T3']['value'] = True
# options['SAPT']['NAT_ORBS_V4']['value'] = True
# options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
# options['SAPT']['DO_MBPT_DISP']['value'] = True
# text += """sapt2+3(ccd)')\n\n"""
#
# elif lowername == 'df-b97-d3':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """b97-d3')\n\n"""
#
# elif lowername == 'df-wb97x-d':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """wb97x-d')\n\n"""
#
# elif lowername == 'df-b3lyp-d3':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """b3lyp-d3')\n\n"""
#
# elif lowername == 'dfdf-b2plyp-d3':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['DFMP2']['MP2_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """b2plyp-d3')\n\n"""
#
# elif lowername == 'df-wpbe':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """wpbe')\n\n"""
#
# elif lowername == 'ccsd-polarizability':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# text = """property('ccsd', properties=['polarizability'])\n\n"""
#
# elif lowername == 'mrccsdt(q)':
# options['SCF']['SCF_TYPE']['value'] = 'pk'
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['GLOBALS']['NAT_ORBS']['value'] = True # needed by mrcc but not recognized by mrcc
# options['FNOCC']['OCC_TOLERANCE']['value'] = 6
# text += """mrccsdt(q)')\n\n"""
#
# elif lowername == 'c4-ccsdt(q)':
# options['CFOUR']['CFOUR_SCF_CONV']['value'] = 11
# options['CFOUR']['CFOUR_CC_CONV']['value'] = 10
# options['CFOUR']['CFOUR_FROZEN_CORE']['value'] = True
# text += """c4-ccsdt(q)')\n\n"""
#
# elif lowername == 'df-m05-2x':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """m05-2x')\n\n"""
else:
raise ValidationError("""Requested Psi4 computational methods %d is not available.""" % (lowername))
# # Set clobbering
# if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
# if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
# if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
# options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options
procedures = {
'energy': {
'wb97x-v' : muster_modelchem,
}
}
qcmtdIN = procedures['energy']
def psi4_list():
"""Return an array of Psi4 methods with energies.
"""
return sorted(procedures['energy'].keys())
|
kratman/psi4public
|
psi4/driver/qcdb/qchem.py
|
Python
|
gpl-2.0
| 18,510
|
[
"CFOUR",
"Psi4",
"Q-Chem"
] |
513e2da05f9cda6f9727c043eee91ad00d7b9e9e6b049e22c4eab3410fbadf18
|
# revlog.py - storage back-end for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Storage back-end for Mercurial.
This provides efficient delta storage with O(1) retrieve and append
and O(changes) merge between branches.
"""
# import stuff from node for others to import from revlog
from node import bin, hex, nullid, nullrev
from i18n import _
import ancestor, mdiff, parsers, error, util, dagutil
import struct, zlib, errno
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = util.sha1
# revlog header flags
REVLOGV0 = 0
REVLOGNG = 1
REVLOGNGINLINEDATA = (1 << 16)
REVLOGGENERALDELTA = (1 << 17)
REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
REVLOG_DEFAULT_FORMAT = REVLOGNG
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
# revlog index flags
REVIDX_KNOWN_FLAGS = 0
# max size of revlog with inline data
_maxinline = 131072
_chunksize = 1048576
RevlogError = error.RevlogError
LookupError = error.LookupError
def getoffset(q):
return int(q >> 16)
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
nullhash = _sha(nullid)
def hash(text, p1, p2):
"""generate a hash from the given text and its parent hashes
This hash combines both the current file contents and its history
in a manner that makes it easy to distinguish nodes with the same
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
if p2 == nullid:
# deep copy of a hash is faster than creating one
s = nullhash.copy()
s.update(p1)
else:
# none of the parent nodes are nullid
l = [p1, p2]
l.sort()
s = _sha(l[0])
s.update(l[1])
s.update(text)
return s.digest()
def decompress(bin):
""" decompress the given input """
if not bin:
return bin
t = bin[0]
if t == '\0':
return bin
if t == 'x':
try:
return _decompress(bin)
except zlib.error, e:
raise RevlogError(_("revlog decompress error: %s") % str(e))
if t == 'u':
return bin[1:]
raise RevlogError(_("unknown compression type %r") % t)
# index v0:
# 4 bytes: offset
# 4 bytes: compressed length
# 4 bytes: base rev
# 4 bytes: link rev
# 32 bytes: parent 1 nodeid
# 32 bytes: parent 2 nodeid
# 32 bytes: nodeid
indexformatv0 = ">4l20s20s20s"
v0shaoffset = 56
class revlogoldio(object):
def __init__(self):
self.size = struct.calcsize(indexformatv0)
def parseindex(self, data, inline):
s = self.size
index = []
nodemap = {nullid: nullrev}
n = off = 0
l = len(data)
while off + s <= l:
cur = data[off:off + s]
off += s
e = _unpack(indexformatv0, cur)
# transform to revlogv1 format
e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
index.append(e2)
nodemap[e[6]] = n
n += 1
# add the magic null revision at -1
index.append((0, 0, 0, -1, -1, -1, -1, nullid))
return index, nodemap, None
def packentry(self, entry, node, version, rev):
if gettype(entry[0]):
raise RevlogError(_("index entry flags need RevlogNG"))
e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
node(entry[5]), node(entry[6]), entry[7])
return _pack(indexformatv0, *e2)
# index ng:
# 6 bytes: offset
# 2 bytes: flags
# 4 bytes: compressed length
# 4 bytes: uncompressed length
# 4 bytes: base rev
# 4 bytes: link rev
# 4 bytes: parent 1 rev
# 4 bytes: parent 2 rev
# 32 bytes: nodeid
indexformatng = ">Qiiiiii20s12x"
ngshaoffset = 32
versionformat = ">I"
class revlogio(object):
def __init__(self):
self.size = struct.calcsize(indexformatng)
def parseindex(self, data, inline):
# call the C implementation to parse the index data
index, cache = parsers.parse_index2(data, inline)
return index, getattr(index, 'nodemap', None), cache
def packentry(self, entry, node, version, rev):
p = _pack(indexformatng, *entry)
if rev == 0:
p = _pack(versionformat, version) + p[4:]
return p
class revlog(object):
"""
the underlying revision storage object
A revlog consists of two parts, an index and the revision data.
The index is a file with a fixed record size containing
information on each revision, including its nodeid (hash), the
nodeids of its parents, the position and offset of its data within
the data file, and the revision it's based on. Finally, each entry
contains a linkrev entry that can serve as a pointer to external
data.
The revision data itself is a linear collection of data chunks.
Each chunk represents a revision and is usually represented as a
delta against the previous chunk. To bound lookup time, runs of
deltas are limited to about 2 times the length of the original
version data. This makes retrieval of a version proportional to
its size, or O(1) relative to the number of revisions.
Both pieces of the revlog are written to in an append-only
fashion, which means we never need to rewrite a file to insert or
remove data, and can use some simple techniques to avoid the need
for locking while reading.
"""
def __init__(self, opener, indexfile):
"""
create a revlog object
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
"""
self.indexfile = indexfile
self.datafile = indexfile[:-2] + ".d"
self.opener = opener
self._cache = None
self._basecache = (0, 0)
self._chunkcache = (0, '')
self.index = []
self._pcache = {}
self._nodecache = {nullid: nullrev}
self._nodepos = None
v = REVLOG_DEFAULT_VERSION
opts = getattr(opener, 'options', None)
if opts is not None:
if 'revlogv1' in opts:
if 'generaldelta' in opts:
v |= REVLOGGENERALDELTA
else:
v = 0
i = ''
self._initempty = True
try:
f = self.opener(self.indexfile)
i = f.read()
f.close()
if len(i) > 0:
v = struct.unpack(versionformat, i[:4])[0]
self._initempty = False
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.version = v
self._inline = v & REVLOGNGINLINEDATA
self._generaldelta = v & REVLOGGENERALDELTA
flags = v & ~0xFFFF
fmt = v & 0xFFFF
if fmt == REVLOGV0 and flags:
raise RevlogError(_("index %s unknown flags %#04x for format v0")
% (self.indexfile, flags >> 16))
elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
raise RevlogError(_("index %s unknown flags %#04x for revlogng")
% (self.indexfile, flags >> 16))
elif fmt > REVLOGNG:
raise RevlogError(_("index %s unknown format %d")
% (self.indexfile, fmt))
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
try:
d = self._io.parseindex(i, self._inline)
except (ValueError, IndexError):
raise RevlogError(_("index %s is corrupted") % (self.indexfile))
self.index, nodemap, self._chunkcache = d
if nodemap is not None:
self.nodemap = self._nodecache = nodemap
if not self._chunkcache:
self._chunkclear()
def tip(self):
return self.node(len(self.index) - 2)
def __len__(self):
return len(self.index) - 1
def __iter__(self):
return iter(xrange(len(self)))
def revs(self, start=0, stop=None):
"""iterate over all rev in this revlog (from start to stop)"""
step = 1
if stop is not None:
if start > stop:
step = -1
stop += step
else:
stop = len(self)
return xrange(start, stop, step)
@util.propertycache
def nodemap(self):
self.rev(self.node(0))
return self._nodecache
def hasnode(self, node):
try:
self.rev(node)
return True
except KeyError:
return False
def clearcaches(self):
try:
self._nodecache.clearcaches()
except AttributeError:
self._nodecache = {nullid: nullrev}
self._nodepos = None
def rev(self, node):
try:
return self._nodecache[node]
except RevlogError:
# parsers.c radix tree lookup failed
raise LookupError(node, self.indexfile, _('no node'))
except KeyError:
# pure python cache lookup failed
n = self._nodecache
i = self.index
p = self._nodepos
if p is None:
p = len(i) - 2
for r in xrange(p, -1, -1):
v = i[r][7]
n[v] = r
if v == node:
self._nodepos = r - 1
return r
raise LookupError(node, self.indexfile, _('no node'))
def node(self, rev):
return self.index[rev][7]
def linkrev(self, rev):
return self.index[rev][4]
def parents(self, node):
i = self.index
d = i[self.rev(node)]
return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
def parentrevs(self, rev):
return self.index[rev][5:7]
def start(self, rev):
return int(self.index[rev][0] >> 16)
def end(self, rev):
return self.start(rev) + self.length(rev)
def length(self, rev):
return self.index[rev][1]
def chainbase(self, rev):
index = self.index
base = index[rev][3]
while base != rev:
rev = base
base = index[rev][3]
return base
def flags(self, rev):
return self.index[rev][0] & 0xFFFF
def rawsize(self, rev):
"""return the length of the uncompressed text for a given revision"""
l = self.index[rev][2]
if l >= 0:
return l
t = self.revision(self.node(rev))
return len(t)
size = rawsize
def ancestors(self, revs, stoprev=0, inclusive=False):
"""Generate the ancestors of 'revs' in reverse topological order.
Does not generate revs lower than stoprev.
See the documentation for ancestor.lazyancestors for more details."""
return ancestor.lazyancestors(self, revs, stoprev=stoprev,
inclusive=inclusive)
def descendants(self, revs):
"""Generate the descendants of 'revs' in revision order.
Yield a sequence of revision numbers starting with a child of
some rev in revs, i.e., each revision is *not* considered a
descendant of itself. Results are ordered by revision number (a
topological sort)."""
first = min(revs)
if first == nullrev:
for i in self:
yield i
return
seen = set(revs)
for i in self.revs(start=first + 1):
for x in self.parentrevs(i):
if x != nullrev and x in seen:
seen.add(i)
yield i
break
def findcommonmissing(self, common=None, heads=None):
"""Return a tuple of the ancestors of common and the ancestors of heads
that are not ancestors of common. In revset terminology, we return the
tuple:
::common, (::heads) - (::common)
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullid]
if heads is None:
heads = self.heads()
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
# we want the ancestors, but inclusive
has = set(self.ancestors(common))
has.add(nullrev)
has.update(common)
# take all ancestors from heads that aren't in has
missing = set()
visit = util.deque(r for r in heads if r not in has)
while visit:
r = visit.popleft()
if r in missing:
continue
else:
missing.add(r)
for p in self.parentrevs(r):
if p not in has:
visit.append(p)
missing = list(missing)
missing.sort()
return has, [self.node(r) for r in missing]
def findmissingrevs(self, common=None, heads=None):
"""Return the revision numbers of the ancestors of heads that
are not ancestors of common.
More specifically, return a list of revision numbers corresponding to
nodes N such that every N satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of revision numbers. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullrev]
if heads is None:
heads = self.headrevs()
return ancestor.missingancestors(heads, common, self.parentrevs)
def findmissing(self, common=None, heads=None):
"""Return the ancestors of heads that are not ancestors of common.
More specifically, return a list of nodes N such that every N
satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullid]
if heads is None:
heads = self.heads()
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
return [self.node(r) for r in
ancestor.missingancestors(heads, common, self.parentrevs)]
def nodesbetween(self, roots=None, heads=None):
"""Return a topological path from 'roots' to 'heads'.
Return a tuple (nodes, outroots, outheads) where 'nodes' is a
topologically sorted list of all nodes N that satisfy both of
these constraints:
1. N is a descendant of some node in 'roots'
2. N is an ancestor of some node in 'heads'
Every node is considered to be both a descendant and an ancestor
of itself, so every reachable node in 'roots' and 'heads' will be
included in 'nodes'.
'outroots' is the list of reachable nodes in 'roots', i.e., the
subset of 'roots' that is returned in 'nodes'. Likewise,
'outheads' is the subset of 'heads' that is also in 'nodes'.
'roots' and 'heads' are both lists of node IDs. If 'roots' is
unspecified, uses nullid as the only root. If 'heads' is
unspecified, uses list of all of the revlog's heads."""
nonodes = ([], [], [])
if roots is not None:
roots = list(roots)
if not roots:
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
roots = [nullid] # Everybody's a descendant of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
return ([self.node(r) for r in self], [nullid], list(self.heads()))
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
highestrev = len(self) - 1
# Set ancestors to None to signal that every node is an ancestor.
ancestors = None
# Set heads to an empty dictionary for later discovery of heads
heads = {}
else:
heads = list(heads)
if not heads:
return nonodes
ancestors = set()
# Turn heads into a dictionary so we can remove 'fake' heads.
# Also, later we will be using it to filter out the heads we can't
# find from roots.
heads = dict.fromkeys(heads, False)
# Start at the top and keep marking parents until we're done.
nodestotag = set(heads)
# Remember where the top was so we can use it as a limit later.
highestrev = max([self.rev(n) for n in nodestotag])
while nodestotag:
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
if n == nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
r = self.rev(n)
if r >= lowestrev:
if n not in ancestors:
# If we are possibly a descendant of one of the roots
# and we haven't already been marked as an ancestor
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update([p for p in self.parents(n) if
p != nullid])
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
# any other heads.
heads.pop(n)
if not ancestors:
return nonodes
# Now that we have our set of ancestors, we want to remove any
# roots that are not ancestors.
# If one of the roots was nullid, everything is included anyway.
if lowestrev > nullrev:
# But, since we weren't, let's recompute the lowest rev to not
# include roots that aren't ancestors.
# Filter out roots that aren't ancestors of heads
roots = [n for n in roots if n in ancestors]
# Recompute the lowest revision
if roots:
lowestrev = min([self.rev(n) for n in roots])
else:
# No more roots? Return empty list
return nonodes
else:
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
roots = [nullid]
# Transform our roots list into a set.
descendants = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
# 'real' roots (i.e. are descended from other roots).
roots = descendants.copy()
# Our topologically sorted list of output nodes.
orderedout = []
# Don't start at nullid since we don't want nullid in our output list,
# and if nullid shows up in descendants, empty parents will look like
# they're descendants.
for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
n = self.node(r)
isdescendant = False
if lowestrev == nullrev: # Everybody is a descendant of nullid
isdescendant = True
elif n in descendants:
# n is already a descendant
isdescendant = True
# This check only needs to be done here because all the roots
# will start being marked is descendants before the loop.
if n in roots:
# If n was a root, check if it's a 'real' root.
p = tuple(self.parents(n))
# If any of its parents are descendants, it's not a root.
if (p[0] in descendants) or (p[1] in descendants):
roots.remove(n)
else:
p = tuple(self.parents(n))
# A node is a descendant if either of its parents are
# descendants. (We seeded the dependents list with the roots
# up there, remember?)
if (p[0] in descendants) or (p[1] in descendants):
descendants.add(n)
isdescendant = True
if isdescendant and ((ancestors is None) or (n in ancestors)):
# Only include nodes that are both descendants and ancestors.
orderedout.append(n)
if (ancestors is not None) and (n in heads):
# We're trying to figure out which heads are reachable
# from roots.
# Mark this head as having been reached
heads[n] = True
elif ancestors is None:
# Otherwise, we're trying to discover the heads.
# Assume this is a head because if it isn't, the next step
# will eventually remove it.
heads[n] = True
# But, obviously its parents aren't.
for p in self.parents(n):
heads.pop(p, None)
heads = [n for n, flag in heads.iteritems() if flag]
roots = list(roots)
assert orderedout
assert roots
assert heads
return (orderedout, roots, heads)
def headrevs(self):
try:
return self.index.headrevs()
except AttributeError:
return self._headrevs()
def _headrevs(self):
count = len(self)
if not count:
return [nullrev]
# we won't iter over filtered rev so nobody is a head at start
ishead = [0] * (count + 1)
index = self.index
for r in self:
ishead[r] = 1 # I may be an head
e = index[r]
ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
return [r for r, val in enumerate(ishead) if val]
def heads(self, start=None, stop=None):
"""return the list of all nodes that have no children
if start is specified, only heads that are descendants of
start will be returned
if stop is specified, it will consider all the revs from stop
as if they had no children
"""
if start is None and stop is None:
if not len(self):
return [nullid]
return [self.node(r) for r in self.headrevs()]
if start is None:
start = nullid
if stop is None:
stop = []
stoprevs = set([self.rev(n) for n in stop])
startrev = self.rev(start)
reachable = set((startrev,))
heads = set((startrev,))
parentrevs = self.parentrevs
for r in self.revs(start=startrev + 1):
for p in parentrevs(r):
if p in reachable:
if r not in stoprevs:
reachable.add(r)
heads.add(r)
if p in heads and p not in stoprevs:
heads.remove(p)
return [self.node(r) for r in heads]
def children(self, node):
"""find the children of a given node"""
c = []
p = self.rev(node)
for r in self.revs(start=p + 1):
prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
if prevs:
for pr in prevs:
if pr == p:
c.append(self.node(r))
elif p == nullrev:
c.append(self.node(r))
return c
def descendant(self, start, end):
if start == nullrev:
return True
for i in self.descendants([start]):
if i == end:
return True
elif i > end:
break
return False
def ancestor(self, a, b):
"""calculate the least common ancestor of nodes a and b"""
a, b = self.rev(a), self.rev(b)
try:
ancs = self.index.ancestors(a, b)
except (AttributeError, OverflowError):
ancs = ancestor.ancestors(self.parentrevs, a, b)
if ancs:
# choose a consistent winner when there's a tie
return min(map(self.node, ancs))
return nullid
def _match(self, id):
if isinstance(id, int):
# rev
return self.node(id)
if len(id) == 20:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
node = id
self.rev(node) # quick search the index
return node
except LookupError:
pass # may be partial hex id
try:
# str(rev)
rev = int(id)
if str(rev) != id:
raise ValueError
if rev < 0:
rev = len(self) + rev
if rev < 0 or rev >= len(self):
raise ValueError
return self.node(rev)
except (ValueError, OverflowError):
pass
if len(id) == 40:
try:
# a full hex nodeid?
node = bin(id)
self.rev(node)
return node
except (TypeError, LookupError):
pass
def _partialmatch(self, id):
try:
return self.index.partialmatch(id)
except RevlogError:
# parsers.c radix tree lookup gave multiple matches
raise LookupError(id, self.indexfile, _("ambiguous identifier"))
except (AttributeError, ValueError):
# we are pure python, or key was too short to search radix tree
pass
if id in self._pcache:
return self._pcache[id]
if len(id) < 40:
try:
# hex(node)[:...]
l = len(id) // 2 # grab an even number of digits
prefix = bin(id[:l * 2])
nl = [e[7] for e in self.index if e[7].startswith(prefix)]
nl = [n for n in nl if hex(n).startswith(id)]
if len(nl) > 0:
if len(nl) == 1:
self._pcache[id] = nl[0]
return nl[0]
raise LookupError(id, self.indexfile,
_('ambiguous identifier'))
return None
except TypeError:
pass
def lookup(self, id):
"""locate a node based on:
- revision number or str(revision number)
- nodeid or subset of hex nodeid
"""
n = self._match(id)
if n is not None:
return n
n = self._partialmatch(id)
if n:
return n
raise LookupError(id, self.indexfile, _('no match found'))
def cmp(self, node, text):
"""compare text with a given file revision
returns True if text is different than what is stored.
"""
p1, p2 = self.parents(node)
return hash(text, p1, p2) != node
def _addchunk(self, offset, data):
o, d = self._chunkcache
# try to add to existing cache
if o + len(d) == offset and len(d) + len(data) < _chunksize:
self._chunkcache = o, d + data
else:
self._chunkcache = offset, data
def _loadchunk(self, offset, length):
if self._inline:
df = self.opener(self.indexfile)
else:
df = self.opener(self.datafile)
readahead = max(65536, length)
df.seek(offset)
d = df.read(readahead)
df.close()
self._addchunk(offset, d)
if readahead > length:
return util.buffer(d, 0, length)
return d
def _getchunk(self, offset, length):
o, d = self._chunkcache
l = len(d)
# is it in the cache?
cachestart = offset - o
cacheend = cachestart + length
if cachestart >= 0 and cacheend <= l:
if cachestart == 0 and cacheend == l:
return d # avoid a copy
return util.buffer(d, cachestart, cacheend - cachestart)
return self._loadchunk(offset, length)
def _chunkraw(self, startrev, endrev):
start = self.start(startrev)
length = self.end(endrev) - start
if self._inline:
start += (startrev + 1) * self._io.size
return self._getchunk(start, length)
def _chunk(self, rev):
return decompress(self._chunkraw(rev, rev))
def _chunkbase(self, rev):
return self._chunk(rev)
def _chunkclear(self):
self._chunkcache = (0, '')
def deltaparent(self, rev):
"""return deltaparent of the given revision"""
base = self.index[rev][3]
if base == rev:
return nullrev
elif self._generaldelta:
return base
else:
return rev - 1
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 != nullrev and self.deltaparent(rev2) == rev1:
return str(self._chunk(rev2))
return mdiff.textdiff(self.revision(rev1),
self.revision(rev2))
def revision(self, nodeorrev):
"""return an uncompressed revision of a given node or revision
number.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = None
cachedrev = None
if node == nullid:
return ""
if self._cache:
if self._cache[0] == node:
return self._cache[2]
cachedrev = self._cache[1]
# look up what we need to read
text = None
if rev is None:
rev = self.rev(node)
# check rev flags
if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
raise RevlogError(_('incompatible revision flag %x') %
(self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
# build delta chain
chain = []
index = self.index # for performance
generaldelta = self._generaldelta
iterrev = rev
e = index[iterrev]
while iterrev != e[3] and iterrev != cachedrev:
chain.append(iterrev)
if generaldelta:
iterrev = e[3]
else:
iterrev -= 1
e = index[iterrev]
chain.reverse()
base = iterrev
if iterrev == cachedrev:
# cache hit
text = self._cache[2]
# drop cache to save memory
self._cache = None
self._chunkraw(base, rev)
if text is None:
text = str(self._chunkbase(base))
bins = [self._chunk(r) for r in chain]
text = mdiff.patches(text, bins)
text = self._checkhash(text, node, rev)
self._cache = (node, rev, text)
return text
def _checkhash(self, text, node, rev):
p1, p2 = self.parents(node)
if node != hash(text, p1, p2):
raise RevlogError(_("integrity check failed on %s:%d")
% (self.indexfile, rev))
return text
def checkinlinesize(self, tr, fp=None):
if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
return
trinfo = tr.find(self.indexfile)
if trinfo is None:
raise RevlogError(_("%s not found in the transaction")
% self.indexfile)
trindex = trinfo[2]
dataoff = self.start(trindex)
tr.add(self.datafile, dataoff)
if fp:
fp.flush()
fp.close()
df = self.opener(self.datafile, 'w')
try:
for r in self:
df.write(self._chunkraw(r, r))
finally:
df.close()
fp = self.opener(self.indexfile, 'w', atomictemp=True)
self.version &= ~(REVLOGNGINLINEDATA)
self._inline = False
for i in self:
e = self._io.packentry(self.index[i], self.node, self.version, i)
fp.write(e)
# if we don't call close, the temp file will never replace the
# real index
fp.close()
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear()
def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
"""add a revision to the log
text - the revision data to add
transaction - the transaction object used for rollback
link - the linkrev data to add
p1, p2 - the parent nodeids of the revision
cachedelta - an optional precomputed delta
"""
node = hash(text, p1, p2)
if node in self.nodemap:
return node
dfh = None
if not self._inline:
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a+")
try:
return self._addrevision(node, text, transaction, link, p1, p2,
cachedelta, ifh, dfh)
finally:
if dfh:
dfh.close()
ifh.close()
def compress(self, text):
""" generate a possibly-compressed representation of text """
if not text:
return ("", text)
l = len(text)
bin = None
if l < 44:
pass
elif l > 1000000:
# zlib makes an internal copy, thus doubling memory usage for
# large files, so lets do this in pieces
z = zlib.compressobj()
p = []
pos = 0
while pos < l:
pos2 = pos + 2**20
p.append(z.compress(text[pos:pos2]))
pos = pos2
p.append(z.flush())
if sum(map(len, p)) < l:
bin = "".join(p)
else:
bin = _compress(text)
if bin is None or len(bin) > l:
if text[0] == '\0':
return ("", text)
return ('u', text)
return ("", bin)
def _addrevision(self, node, text, transaction, link, p1, p2,
cachedelta, ifh, dfh):
"""internal function to add revisions to the log
see addrevision for argument descriptions.
invariants:
- text is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to each other.
"""
btext = [text]
def buildtext():
if btext[0] is not None:
return btext[0]
# flush any pending writes here so we can read it in revision
if dfh:
dfh.flush()
ifh.flush()
basetext = self.revision(self.node(cachedelta[0]))
btext[0] = mdiff.patch(basetext, cachedelta[1])
chk = hash(btext[0], p1, p2)
if chk != node:
raise RevlogError(_("consistency error in delta"))
return btext[0]
def builddelta(rev):
# can we use the cached delta?
if cachedelta and cachedelta[0] == rev:
delta = cachedelta[1]
else:
t = buildtext()
ptext = self.revision(self.node(rev))
delta = mdiff.textdiff(ptext, t)
data = self.compress(delta)
l = len(data[1]) + len(data[0])
if basecache[0] == rev:
chainbase = basecache[1]
else:
chainbase = self.chainbase(rev)
dist = l + offset - self.start(chainbase)
if self._generaldelta:
base = rev
else:
base = chainbase
return dist, l, data, base, chainbase
curr = len(self)
prev = curr - 1
base = chainbase = curr
offset = self.end(prev)
flags = 0
d = None
basecache = self._basecache
p1r, p2r = self.rev(p1), self.rev(p2)
# should we try to build a delta?
if prev != nullrev:
if self._generaldelta:
if p1r >= basecache[1]:
d = builddelta(p1r)
elif p2r >= basecache[1]:
d = builddelta(p2r)
else:
d = builddelta(prev)
else:
d = builddelta(prev)
dist, l, data, base, chainbase = d
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
if text is None:
textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
cachedelta[1])
else:
textlen = len(text)
if d is None or dist > textlen * 2:
text = buildtext()
data = self.compress(text)
l = len(data[1]) + len(data[0])
base = chainbase = curr
e = (offset_type(offset, flags), l, textlen,
base, link, p1r, p2r, node)
self.index.insert(-1, e)
self.nodemap[node] = curr
entry = self._io.packentry(e, self.node, self.version, curr)
if not self._inline:
transaction.add(self.datafile, offset)
transaction.add(self.indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
dfh.flush()
ifh.write(entry)
else:
offset += curr * self._io.size
transaction.add(self.indexfile, offset, curr)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
self.checkinlinesize(transaction, ifh)
if type(text) == str: # only accept immutable objects
self._cache = (node, curr, text)
self._basecache = (curr, chainbase)
return node
def group(self, nodelist, bundler, reorder=None):
"""Calculate a delta group, yielding a sequence of changegroup chunks
(strings).
Given a list of changeset revs, return a set of deltas and
metadata corresponding to nodes. The first delta is
first parent(nodelist[0]) -> nodelist[0], the receiver is
guaranteed to have this parent as it has all history before
these changesets. In the case firstparent is nullrev the
changegroup starts with a full revision.
"""
# if we don't have any revisions touched by these changesets, bail
if len(nodelist) == 0:
yield bundler.close()
return
# for generaldelta revlogs, we linearize the revs; this will both be
# much quicker and generate a much smaller bundle
if (self._generaldelta and reorder is not False) or reorder:
dag = dagutil.revlogdag(self)
revs = set(self.rev(n) for n in nodelist)
revs = dag.linearize(revs)
else:
revs = sorted([self.rev(n) for n in nodelist])
# add the parent of the first rev
p = self.parentrevs(revs[0])[0]
revs.insert(0, p)
# build deltas
for r in xrange(len(revs) - 1):
prev, curr = revs[r], revs[r + 1]
for c in bundler.revchunk(self, curr, prev):
yield c
yield bundler.close()
def addgroup(self, bundle, linkmapper, transaction):
"""
add a delta group
given a set of deltas, add them to the revision log. the
first delta is against its parent, which should be in our
log, the rest are against the previous delta.
"""
# track the base of the current delta log
content = []
node = None
r = len(self)
end = 0
if r:
end = self.end(r - 1)
ifh = self.opener(self.indexfile, "a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
dfh = None
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
dfh = self.opener(self.datafile, "a")
try:
# loop through our set of deltas
chain = None
while True:
chunkdata = bundle.deltachunk(chain)
if not chunkdata:
break
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
cs = chunkdata['cs']
deltabase = chunkdata['deltabase']
delta = chunkdata['delta']
content.append(node)
link = linkmapper(cs)
if node in self.nodemap:
# this can happen if two branches make the same change
chain = node
continue
for p in (p1, p2):
if p not in self.nodemap:
raise LookupError(p, self.indexfile,
_('unknown parent'))
if deltabase not in self.nodemap:
raise LookupError(deltabase, self.indexfile,
_('unknown delta base'))
baserev = self.rev(deltabase)
chain = self._addrevision(node, None, transaction, link,
p1, p2, (baserev, delta), ifh, dfh)
if not dfh and not self._inline:
# addrevision switched from inline to conventional
# reopen the index
ifh.close()
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
finally:
if dfh:
dfh.close()
ifh.close()
return content
def strip(self, minlink, transaction):
"""truncate the revlog on the first revision with a linkrev >= minlink
This function is called when we're stripping revision minlink and
its descendants from the repository.
We have to remove all revisions with linkrev >= minlink, because
the equivalent changelog revisions will be renumbered after the
strip.
So we truncate the revlog on the first of these revisions, and
trust that the caller has saved the revisions that shouldn't be
removed and that it'll re-add them after this truncation.
"""
if len(self) == 0:
return
for rev in self:
if self.index[rev][4] >= minlink:
break
else:
return
# first truncate the files on disk
end = self.start(rev)
if not self._inline:
transaction.add(self.datafile, end)
end = rev * self._io.size
else:
end += rev * self._io.size
transaction.add(self.indexfile, end)
# then reset internal state in memory to forget those revisions
self._cache = None
self._chunkclear()
for x in xrange(rev, len(self)):
del self.nodemap[self.node(x)]
del self.index[rev:-1]
def checksize(self):
expected = 0
if len(self):
expected = max(0, self.end(len(self) - 1))
try:
f = self.opener(self.datafile)
f.seek(0, 2)
actual = f.tell()
f.close()
dd = actual - expected
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
dd = 0
try:
f = self.opener(self.indexfile)
f.seek(0, 2)
actual = f.tell()
f.close()
s = self._io.size
i = max(0, actual // s)
di = actual - (i * s)
if self._inline:
databytes = 0
for r in self:
databytes += max(0, self.length(r))
dd = 0
di = actual - len(self) * s - databytes
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
di = 0
return (dd, di)
def files(self):
res = [self.indexfile]
if not self._inline:
res.append(self.datafile)
return res
|
iaddict/mercurial.rb
|
vendor/mercurial/mercurial/revlog.py
|
Python
|
mit
| 45,735
|
[
"VisIt"
] |
6cc6785e649b4c436d2f607516ef97e501bbefc16ea815d7f5aef30975d2f809
|
# -*- coding: utf-8 -*-
import os
import datetime
from pysite.lib import load_site_config
NAME = "eventlist"
TITLE = "Loads a list of events."
DESCR = """
*************
The Eventlist
*************
Basic Format
============
An eventlist is a YAML mapping. The field ``EVENTS`` contains the list of
events.
Each event is a mapping which has at least fields ``date_from`` and ``date_to``.
Both dates must be strings, preferrably as ISO dates: YYYY-MM-DD.
An event may have arbitrary additional fields. Their meaning depends on the
used Jinja template.
The order of the fields or of events is not relevant. The list is
automatically sorted by ``date_from``.
Sample::
EVENTS:
- date_from: 2012-10-09
date_to: 2012-10-12
title: Breakfast by Tiffany
- date_from: 2012-11-01
date_to: 2012-11-30
date_info: From dusk til dawn
title: Call me November
info: "<p>Yes, I am a real month.</br>Visit me next year, too!</p>"
Default Format
==============
Event Fields
------------
``date_info``: (optional) A text line with additional information about the
date.
E.g.: "Do 16 Uhr - So 13 Uhr"
``title``: Title of event
``subtitle``: (optional) A subtitle
``location``: The location.
E.g. "Baden-Baden"
``address``: (optional) A string with address information. Use HTML to format
it.
E.g. "Seminarhaus Breema<br />Obere Windeckstr. 20"
``contacts``: (optional) A list of contacts (see below). Either an explicit
contact mapping or a reference to a contact of the CONTACTS
list.
Contacts
--------
Each contact is a mapping with these fields:
``email``: Email address
``name``: Name of contact person
``phone``: A phone number
To avoid redundancy, you may collect all contact mappings as a list of key
``CONTACTS`` and inside an event refer to it with a YAML reference.
E.g.::
CONTACTS:
- &idFOO
email: foo@example.com
name: Mr. Foo
phone: 555-7745
EVENTS:
- date_from: 2012-12-01
date_to: 2012-12-02
title: The Big Frobotz
contacts:
- *idFOO
"""
def factory(rc=None):
"""
Returns plugin instance for use as a thread local variable.
In other words, this instance is create once within a Pyramid
application. Store it e.g. in the application registry.
"""
pass
def request_factory(site, context, request, rc=None):
"""
Returns plugin instance for use within a request.
In other words, this instance is created freshly for each request.
Use :func:`factory` to create an instance suitable as a thread local.
:param site: Resource node of the current site.
:param context: Context of current request
:param request: Current request
:param rc: Optional config settings
:returns: A fully initialised instance of the plugin
"""
o = Eventlist(site, context, request, rc)
return o
class Eventlist(object):
def __init__(self, site, context, request, rc=None):
self._site = site
self._context = context
self._request = request
self._rc = rc
self._data = None
self._eventlist = None
def load(self, fn):
fn = os.path.join('plugins', NAME, os.path.normpath(fn))
self._eventlist = load_site_config(self._site.dir_, fn)
# TODO Convert dates into datetime objects
self._eventlist['EVENTS'].sort(key=lambda it: it['date_from'])
def all_events(self, year=None):
"""
Returns list of all events.
:param year: Optionally filter by year
"""
if year:
r = []
for evt in self._eventlist['EVENTS']:
if evt['date_from'].year == year:
r.append(evt)
return r
else:
return self._eventlist['EVENTS']
def next_events(self, n=3):
"""
Returns list of n coming events.
:param n: Number of events to return, 0=all coming events
"""
now = datetime.date.today()
i = 0
r = []
for evt in self._eventlist['EVENTS']:
if evt['date_from'] < now:
continue
r.append(evt)
i += 1
if n > 0 and i >= n:
break
return r
|
dmdm/PySite
|
pysite/plugins/eventlist/__init__.py
|
Python
|
agpl-3.0
| 4,369
|
[
"VisIt"
] |
48231466f2def6331a423e89f728321cfcda536d7cf1cd43b66f1db1ffa2dda8
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import warn_if_not_float
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis)
from ..utils.validation import check_is_fitted
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
warn_if_not_float(X, estimator='The scale function')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var[var == 0.0] = 1.0
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False)
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
if isinstance(data_range, np.ndarray):
data_range[data_range == 0.0] = 1.0
elif data_range == 0.:
data_range = 1.
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ :
powers_[i, j] is the exponent of the jth input in the ith output.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _power_matrix(n_features, degree, interaction_only, include_bias):
"""Compute the matrix of polynomial powers"""
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
combn = chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
powers = np.vstack(np.bincount(c, minlength=n_features) for c in combn)
return powers
def fit(self, X, y=None):
"""
Compute the polynomial feature combinations
"""
n_samples, n_features = check_array(X).shape
self.powers_ = self._power_matrix(n_features, self.degree,
self.interaction_only,
self.include_bias)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, 'powers_')
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.powers_.shape[1]:
raise ValueError("X shape does not match training shape")
return (X[:, None, :] ** self.powers_).prod(-1)
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy)
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = row_norms(X)
norms[norms == 0.0] = 1.0
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
ycaihua/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 39,819
|
[
"Gaussian"
] |
b2ced4967ee6f2b90524b880e586d45b961e48da4ab0f9489d89831eea9c7e39
|
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import numpy
import bettertest
class GenericTDunTest(bettertest.TestCase):
"""Time-dependent HF/DFT unittest for unrestricted case."""
def testenergiesnumber(self):
"""Is the length of etenergies correct?"""
self.assertEqual(len(self.data.etenergies), self.number)
def testoscsnumber(self):
"""Is the length of eotscs correct?"""
self.assertEqual(len(self.data.etoscs), self.number)
def testrotatsnumber(self):
"""Is the length of etrotats correct?"""
self.assertEqual(len(self.data.etrotats), self.number)
def testsecsnumber(self):
"""Is the length of etsecs correct?"""
self.assertEqual(len(self.data.etsecs), self.number)
def testsymsnumber(self):
"""Is the length of etsyms correct?"""
self.assertEqual(len(self.data.etsyms), self.number)
class GaussianTDDFTunTest(GenericTDunTest):
"""Gaussian time-dependent HF/DFT unittest."""
number = 24
def testsyms(self):
"""Is etsyms populated by singlets and triplets 50/50?"""
singlets = [sym for sym in self.data.etsyms if "Singlet" in sym]
triplets = [sym for sym in self.data.etsyms if "Triplet" in sym]
self.assertEqual(len(singlets), self.number/2)
self.assertEqual(len(triplets), self.number/2)
if __name__=="__main__":
from testall import testall
testall(modules=["TDun"])
|
Clyde-fare/cclib_bak
|
test/testTDun.py
|
Python
|
lgpl-2.1
| 1,911
|
[
"Gaussian",
"cclib"
] |
2e26e9a04d571c9bdb934c3b179202dfe1f58ff2d8953b811f32d21c6e22cf6f
|
import subprocess
import os
import datetime
import wand.image
import glob
def pnmtopdf(pnmfile, pdffile, resolution=None):
with wand.image.Image(filename=pnmfile, resolution=resolution) as pnm:
with pnm.convert('pdf') as pdf:
pdf.save(filename=pdffile)
os.remove(pnmfile)
scan_options = {
'device': '--device-name',
'resolution': '--resolution',
'mode': '--mode',
'source': '--source',
'brightness': '--brightness',
'contrast': '--contrast',
'width': '-x',
'height': '-y',
'left': '-l',
'top': '-t',
}
def add_scan_options(cmd, options):
for name, arg in scan_options.items():
if name in options:
cmd += [arg, str(options[name])]
cmd = [ str(c) for c in cmd ]
def scanto(func, options):
print('scanto %s %s'%(func, options))
options = options.copy()
if func == 'FILE':
if not 'dir' in options:
options['dir'] = '/tmp'
dst = options['dir']
os.makedirs(dst, exist_ok=True)
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
adf = options.pop('adf', False)
if adf:
cmd = ['scanadf',
'--output-file', os.path.join(dst, 'scan_%s_%%d.pnm'%(now))]
add_scan_options(cmd, options)
print('# ' + ' '.join(cmd))
subprocess.call(cmd)
pnmfiles = []
pdffiles = []
for pnmfile in glob.glob(os.path.join(dst, 'scan_%s_*.pnm'%(now))):
pdffile = '%s.pdf'%(pnmfile[:-4])
pnmtopdf(pnmfile, pdffile, options['resolution'])
pnmfiles.append(pnmfile)
pdffiles.append(pdffile)
cmd = ['pdfunite'] + pdffiles + [os.path.join(dst, 'scan_%s.pdf'%(now))]
print('# ' + ' '.join(cmd))
subprocess.call(cmd)
for f in pdffiles:
os.remove(f)
else:
cmd = ['scanimage']
add_scan_options(cmd, options)
pnmfile = os.path.join(dst, 'scan_%s.pnm'%(now))
with open(pnmfile, 'w') as pnm:
print('# ' + ' '.join(cmd))
process = subprocess.Popen(cmd, stdout=pnm)
process.wait()
pdffile = '%s.pdf'%(pnmfile[:-4])
pnmtopdf(pnmfile, pdffile, options['resolution'])
print('Wrote', pdffile)
|
esben/brother-scan
|
brscan/scanto.py
|
Python
|
gpl-3.0
| 2,260
|
[
"ADF"
] |
bb9d9b8ce9ab33163d346f81043a9877f18eb22734f320139954c89acd4096fe
|
# Copyright (C) 2001 greg Landrum and Rational Discovery LLC
"""basic unit testing code for query mols
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest,os,sys
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
# decipher the name of the executable
if(sys.platform == 'win32'):
exe = 'QueryMolTest___Win32_Debug/QueryMolTest.exe'
else:
exe = 'querytest.exe'
# update to use the full path
self.exe = '%s/Code/GraphMol/%s'%(RDConfig.RDBaseDir,exe)
def test1(self):
""" the basic test """
res = os.system(self.exe)
assert res == 0, 'test failed'
if __name__ == '__main__':
unittest.main()
|
strets123/rdkit
|
Code/GraphMol/UnitTestQueryMol.py
|
Python
|
bsd-3-clause
| 734
|
[
"RDKit"
] |
df946def9cd1147dd9b287a4e6166c91dbe5b1d2d61763ddd342fbc989fa6d3a
|
names = [
{
"GivenName":"Joe",
"Surname":"Schmoe",
"EmailAddress":"test@example.com",
"TelephoneNumber":"402-427-6916",
"StreetAddress":"785 Oak Way",
"City":"Kennard",
"State":"NE",
"ZipCode":68034
},
{
"GivenName":"Ethel",
"Surname":"Wagner",
"EmailAddress":"EthelGWagner@einrot.com",
"TelephoneNumber":"508-929-3251",
"StreetAddress":"2653 Lyon Avenue",
"City":"Worcester",
"State":"MA",
"ZipCode":41609
},
{
"GivenName":"Gina",
"Surname":"Fincham",
"EmailAddress":"GinaSFincham@gustr.com",
"TelephoneNumber":"954-608-0439",
"StreetAddress":"3678 Kenwood Place",
"City":"Miramar",
"State":"FL",
"ZipCode":33025
},
{
"GivenName":"Teresa",
"Surname":"Harris",
"EmailAddress":"TeresaMHarris@gustr.com",
"TelephoneNumber":"919-382-2344",
"StreetAddress":"3693 Dola Mine Road",
"City":"Durham",
"State":"NC",
"ZipCode":27705
},
{
"GivenName":"Mike",
"Surname":"Zhang",
"EmailAddress":"MikeJZhang@armyspy.com",
"TelephoneNumber":"508-535-5877",
"StreetAddress":"2836 Kovar Road",
"City":"Boston",
"State":"MA",
"ZipCode":42110
},
{
"GivenName":"Hector",
"Surname":"Clement",
"EmailAddress":"HectorSClement@teleworm.us",
"TelephoneNumber":"609-333-1936",
"StreetAddress":"587 Lake Road",
"City":"Hopewell Mercer",
"State":"NJ",
"ZipCode":48525
},
{
"GivenName":"Brad",
"Surname":"Coffman",
"EmailAddress":"BradHCoffman@einrot.com",
"TelephoneNumber":"989-584-0128",
"StreetAddress":"1646 Mount Street",
"City":"Carson City",
"State":"MI",
"ZipCode":48811
},
{
"GivenName":"Dorothy",
"Surname":"Weller",
"EmailAddress":"DorothyAWeller@teleworm.us",
"TelephoneNumber":"513-894-0215",
"StreetAddress":"4851 Round Table Drive",
"City":"Hamilton",
"State":"OH",
"ZipCode":45011
},
{
"GivenName":"Pat",
"Surname":"Anthony",
"EmailAddress":"PatFAnthony@cuvox.de",
"TelephoneNumber":"985-655-7850",
"StreetAddress":"3994 Woodland Avenue",
"City":"New Orleans",
"State":"LA",
"ZipCode":70171
},
{
"GivenName":"Adah",
"Surname":"Meyer",
"EmailAddress":"AdahEMeyer@teleworm.us",
"TelephoneNumber":"773-374-9243",
"StreetAddress":"2117 Oakmound Drive",
"City":"Chicago",
"State":"IL",
"ZipCode":60617
},
{
"GivenName":"Angela",
"Surname":"Castro",
"EmailAddress":"AngelaHCastro@teleworm.us",
"TelephoneNumber":"914-406-8978",
"StreetAddress":"4211 Pallet Street",
"City":"West Nyack",
"State":"NY",
"ZipCode":10994
},
{
"GivenName":"Lynn",
"Surname":"Delgado",
"EmailAddress":"LynnYDelgado@rhyta.com",
"TelephoneNumber":"936-266-1092",
"StreetAddress":"4346 Woodrow Way",
"City":"Conroe",
"State":"TX",
"ZipCode":77301
},
{
"GivenName":"Ilene",
"Surname":"Hayne",
"EmailAddress":"IleneBHayne@einrot.com",
"TelephoneNumber":"928-680-7398",
"StreetAddress":"396 Martha Street",
"City":"Lake Havasu City",
"State":"AZ",
"ZipCode":86403
},
{
"GivenName":"Josh",
"Surname":"Brownlee",
"EmailAddress":"JoshSBrownlee@armyspy.com",
"TelephoneNumber":"405-732-9878",
"StreetAddress":"1593 Ruckman Road",
"City":"Oklahoma City",
"State":"OK",
"ZipCode":73110
},
{
"GivenName":"Sidney",
"Surname":"Rivera",
"EmailAddress":"SidneyJRivera@fleckens.hu",
"TelephoneNumber":"612-990-4625",
"StreetAddress":"1655 Rocket Drive",
"City":"Minneapolis",
"State":"MN",
"ZipCode":55415
},
{
"GivenName":"Kathleen",
"Surname":"Marshall",
"EmailAddress":"KathleenWMarshall@teleworm.us",
"TelephoneNumber":"301-280-5810",
"StreetAddress":"855 Roane Avenue",
"City":"Chevy Chase",
"State":"MD",
"ZipCode":20815
},
{
"GivenName":"Tom",
"Surname":"Escoto",
"EmailAddress":"TomKEscoto@armyspy.com",
"TelephoneNumber":"717-781-3544",
"StreetAddress":"3265 Aaron Smith Drive",
"City":"York",
"State":"PA",
"ZipCode":17404
},
{
"GivenName":"Lucille",
"Surname":"Ball",
"EmailAddress":"LucilleWBall@armyspy.com",
"TelephoneNumber":"909-798-2558",
"StreetAddress":"3450 Roosevelt Wilson Lane",
"City":"Redlands",
"State":"CA",
"ZipCode":92373
},
{
"GivenName":"Patricia",
"Surname":"Campbell",
"EmailAddress":"PatriciaBCampbell@fleckens.hu",
"TelephoneNumber":"720-217-1883",
"StreetAddress":"3829 Snider Street",
"City":"Denver",
"State":"CO",
"ZipCode":80216
},
{
"GivenName":"Carol",
"Surname":"Brickey",
"EmailAddress":"CarolWBrickey@rhyta.com",
"TelephoneNumber":"757-819-2361",
"StreetAddress":"4494 Jefferson Street",
"City":"Chesapeake",
"State":"VA",
"ZipCode":23320
},
{
"GivenName":"Jamel",
"Surname":"Bird",
"EmailAddress":"JamelRBird@fleckens.hu",
"TelephoneNumber":"510-575-0650",
"StreetAddress":"4778 Lindale Avenue",
"City":"San Francisco",
"State":"CA",
"ZipCode":94105
},
{
"GivenName":"Joshua",
"Surname":"Critchfield",
"EmailAddress":"JoshuaTCritchfield@teleworm.us",
"TelephoneNumber":"870-730-8230",
"StreetAddress":"1714 Sunset Drive",
"City":"Pine Bluff",
"State":"AR",
"ZipCode":71601
},
{
"GivenName":"Tracey",
"Surname":"Koenig",
"EmailAddress":"TraceyJKoenig@cuvox.de",
"TelephoneNumber":"646-244-7334",
"StreetAddress":"3843 Morningview Lane",
"City":"New York",
"State":"NY",
"ZipCode":10013
},
{
"GivenName":"Salvatore",
"Surname":"Cavanaugh",
"EmailAddress":"SalvatoreMCavanaugh@jourrapide.com",
"TelephoneNumber":"214-743-3714",
"StreetAddress":"4158 Carolyns Circle",
"City":"Dallas",
"State":"TX",
"ZipCode":75202
},
{
"GivenName":"Rachael",
"Surname":"Carter",
"EmailAddress":"RachaelNCarter@einrot.com",
"TelephoneNumber":"323-928-3107",
"StreetAddress":"419 Hillhaven Drive",
"City":"Anaheim",
"State":"CA",
"ZipCode":92801
},
{
"GivenName":"Wilson",
"Surname":"Young",
"EmailAddress":"WilsonCYoung@gustr.com",
"TelephoneNumber":"516-848-5599",
"StreetAddress":"823 Westwood Avenue",
"City":"Huntington",
"State":"NY",
"ZipCode":11743
},
{
"GivenName":"Neomi",
"Surname":"Perez",
"EmailAddress":"NeomiSPerez@jourrapide.com",
"TelephoneNumber":"616-742-7697",
"StreetAddress":"4932 West Street",
"City":"Grand Rapids",
"State":"MI",
"ZipCode":49503
},
{
"GivenName":"Timothy",
"Surname":"Mason",
"EmailAddress":"TimothyMMason@teleworm.us",
"TelephoneNumber":"724-647-0405",
"StreetAddress":"101 Michigan Avenue",
"City":"Portland",
"State":"PA",
"ZipCode":97205
},
{
"GivenName":"Lori",
"Surname":"Inge",
"EmailAddress":"LoriGInge@superrito.com",
"TelephoneNumber":"425-395-0046",
"StreetAddress":"3011 Conifer Drive",
"City":"Seattle",
"State":"WA",
"ZipCode":98101
},
{
"GivenName":"Scott",
"Surname":"Dalzell",
"EmailAddress":"ScottLDalzell@gustr.com",
"TelephoneNumber":"678-789-0549",
"StreetAddress":"4411 Edington Drive",
"City":"Norcross",
"State":"GA",
"ZipCode":30071
},
{
"GivenName":"Kevin",
"Surname":"Garcia",
"EmailAddress":"KevinAGarcia@fleckens.hu",
"TelephoneNumber":"843-435-9756",
"StreetAddress":"4350 Jerry Dove Drive",
"City":"Johnsonville",
"State":"SC",
"ZipCode":29555
},
{
"GivenName":"David",
"Surname":"McIntyre",
"EmailAddress":"DavidCMcIntyre@cuvox.de",
"TelephoneNumber":"740-455-8346",
"StreetAddress":"3937 Irving Road",
"City":"Zanesville",
"State":"OH",
"ZipCode":43701
},
{
"GivenName":"Steven",
"Surname":"Milton",
"EmailAddress":"StevenBMilton@rhyta.com",
"TelephoneNumber":"443-678-1666",
"StreetAddress":"3678 Hewes Avenue",
"City":"White Marsh",
"State":"MD",
"ZipCode":21162
},
{
"GivenName":"Rita",
"Surname":"Brandon",
"EmailAddress":"RitaABrandon@einrot.com",
"TelephoneNumber":"425-438-9230",
"StreetAddress":"4951 Conifer Drive",
"City":"Everett",
"State":"WA",
"ZipCode":98204
},
{
"GivenName":"Irma",
"Surname":"Nguyen",
"EmailAddress":"IrmaTNguyen@armyspy.com",
"TelephoneNumber":"502-649-3224",
"StreetAddress":"4188 Earnhardt Drive",
"City":"Louisville",
"State":"KY",
"ZipCode":40299
},
{
"GivenName":"Robert",
"Surname":"Claude",
"EmailAddress":"RobertSClaude@jourrapide.com",
"TelephoneNumber":"707-435-1495",
"StreetAddress":"2258 Fairway Drive",
"City":"Fairfield",
"State":"CA",
"ZipCode":94533
},
{
"GivenName":"Jonathan",
"Surname":"Hansen",
"EmailAddress":"JonathanMHansen@dayrep.com",
"TelephoneNumber":"573-491-5131",
"StreetAddress":"2952 Oak Ridge Drive",
"City":"New Bloomfield",
"State":"MO",
"ZipCode":65063
},
{
"GivenName":"Alan",
"Surname":"Banks",
"EmailAddress":"AlanABanks@jourrapide.com",
"TelephoneNumber":"740-480-3038",
"StreetAddress":"1901 Irving Road",
"City":"Mansfield",
"State":"OH",
"ZipCode":44907
},
{
"GivenName":"George",
"Surname":"Buchholtz",
"EmailAddress":"GeorgeBBuchholtz@gustr.com",
"TelephoneNumber":"317-215-5857",
"StreetAddress":"998 Henery Street",
"City":"Indianapolis",
"State":"IN",
"ZipCode":46225
},
{
"GivenName":"Kevin",
"Surname":"Barnett",
"EmailAddress":"KevinSBarnett@cuvox.de",
"TelephoneNumber":"765-427-9068",
"StreetAddress":"3617 Sugarfoot Lane",
"City":"Lafayette",
"State":"IN",
"ZipCode":47906
},
{
"GivenName":"Robert",
"Surname":"Whitfield",
"EmailAddress":"RobertVWhitfield@fleckens.hu",
"TelephoneNumber":"425-629-1226",
"StreetAddress":"4131 Main Street",
"City":"Seattle",
"State":"WA",
"ZipCode":98109
},
{
"GivenName":"Rachelle",
"Surname":"Rice",
"EmailAddress":"RachelleJRice@superrito.com",
"TelephoneNumber":"203-887-9758",
"StreetAddress":"1299 Raoul Wallenberg Place",
"City":"Wallingford",
"State":"CT",
"ZipCode":46492
},
{
"GivenName":"Traci",
"Surname":"Phillips",
"EmailAddress":"TraciNPhillips@jourrapide.com",
"TelephoneNumber":"308-879-7168",
"StreetAddress":"2735 Rollins Road",
"City":"Potter",
"State":"NE",
"ZipCode":69156
},
{
"GivenName":"Ellen",
"Surname":"Payne",
"EmailAddress":"EllenCPayne@armyspy.com",
"TelephoneNumber":"530-374-4132",
"StreetAddress":"4354 Riverwood Drive",
"City":"Sacramento",
"State":"CA",
"ZipCode":95814
},
{
"GivenName":"Tammy",
"Surname":"Register",
"EmailAddress":"TammyCRegister@rhyta.com",
"TelephoneNumber":"901-553-1385",
"StreetAddress":"256 Lightning Point Drive",
"City":"Memphis",
"State":"TN",
"ZipCode":38110
},
{
"GivenName":"Andrea",
"Surname":"Foster",
"EmailAddress":"AndreaLFoster@rhyta.com",
"TelephoneNumber":"323-577-8527",
"StreetAddress":"1947 Evergreen Lane",
"City":"Irvine",
"State":"CA",
"ZipCode":92618
},
{
"GivenName":"Mary",
"Surname":"Odea",
"EmailAddress":"MaryROdea@cuvox.de",
"TelephoneNumber":"847-415-9371",
"StreetAddress":"2801 Johnstown Road",
"City":"Chicago",
"State":"IL",
"ZipCode":60605
},
{
"GivenName":"Gary",
"Surname":"Pierce",
"EmailAddress":"GaryEPierce@einrot.com",
"TelephoneNumber":"603-654-8070",
"StreetAddress":"1064 Elliott Street",
"City":"Wilton",
"State":"NH",
"ZipCode":43086
},
{
"GivenName":"Douglas",
"Surname":"Wimbush",
"EmailAddress":"DouglasCWimbush@rhyta.com",
"TelephoneNumber":"313-238-1094",
"StreetAddress":"849 Nash Street",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Thomas",
"Surname":"Shanklin",
"EmailAddress":"ThomasSShanklin@gustr.com",
"TelephoneNumber":"334-753-8877",
"StreetAddress":"4708 Quarry Drive",
"City":"Dothan",
"State":"AL",
"ZipCode":36303
},
{
"GivenName":"Louise",
"Surname":"Green",
"EmailAddress":"LouiseJGreen@jourrapide.com",
"TelephoneNumber":"314-542-4654",
"StreetAddress":"3618 Blane Street",
"City":"Saint Louis",
"State":"MO",
"ZipCode":63141
},
{
"GivenName":"Adrian",
"Surname":"Meighan",
"EmailAddress":"AdrianRMeighan@teleworm.us",
"TelephoneNumber":"206-522-5418",
"StreetAddress":"3863 Elliot Avenue",
"City":"Seattle",
"State":"WA",
"ZipCode":98115
},
{
"GivenName":"Marcus",
"Surname":"Kim",
"EmailAddress":"MarcusRKim@cuvox.de",
"TelephoneNumber":"661-319-8772",
"StreetAddress":"4129 Williams Avenue",
"City":"Bakersfield",
"State":"CA",
"ZipCode":93304
},
{
"GivenName":"William",
"Surname":"Depriest",
"EmailAddress":"WilliamEDepriest@superrito.com",
"TelephoneNumber":"337-266-7523",
"StreetAddress":"3127 Sherwood Circle",
"City":"Lafayette",
"State":"LA",
"ZipCode":70501
},
{
"GivenName":"Jorge",
"Surname":"Talavera",
"EmailAddress":"JorgeRTalavera@jourrapide.com",
"TelephoneNumber":"520-244-6988",
"StreetAddress":"3760 Elk Rd Little",
"City":"Tucson",
"State":"AZ",
"ZipCode":85701
},
{
"GivenName":"Joanne",
"Surname":"Vest",
"EmailAddress":"JoanneDVest@superrito.com",
"TelephoneNumber":"816-779-1490",
"StreetAddress":"151 Big Elm",
"City":"Peculiar",
"State":"MO",
"ZipCode":64078
},
{
"GivenName":"Josie",
"Surname":"Becker",
"EmailAddress":"JosieRBecker@teleworm.us",
"TelephoneNumber":"703-416-0310",
"StreetAddress":"3732 Daffodil Lane",
"City":"Arlington",
"State":"VA",
"ZipCode":22202
},
{
"GivenName":"Frank",
"Surname":"Kile",
"EmailAddress":"FrankMKile@superrito.com",
"TelephoneNumber":"978-338-5679",
"StreetAddress":"2279 Hampton Meadows",
"City":"Acton",
"State":"MA",
"ZipCode":41720
},
{
"GivenName":"Patricia",
"Surname":"Burnett",
"EmailAddress":"PatriciaRBurnett@dayrep.com",
"TelephoneNumber":"443-632-0938",
"StreetAddress":"1066 Hewes Avenue",
"City":"Columbia",
"State":"MD",
"ZipCode":21046
},
{
"GivenName":"Lisa",
"Surname":"Willis",
"EmailAddress":"LisaTWillis@jourrapide.com",
"TelephoneNumber":"904-464-9952",
"StreetAddress":"4948 Arrowood Drive",
"City":"Jacksonville",
"State":"FL",
"ZipCode":32256
},
{
"GivenName":"Rusty",
"Surname":"Carey",
"EmailAddress":"RustyCCarey@superrito.com",
"TelephoneNumber":"952-446-9054",
"StreetAddress":"1858 Oral Lake Road",
"City":"St Bonifacius",
"State":"MN",
"ZipCode":55364
},
{
"GivenName":"George",
"Surname":"Morales",
"EmailAddress":"GeorgeIMorales@einrot.com",
"TelephoneNumber":"660-725-9538",
"StreetAddress":"3453 Oak Lane",
"City":"Burlington Junction",
"State":"MO",
"ZipCode":64428
},
{
"GivenName":"Maria",
"Surname":"Harris",
"EmailAddress":"MariaJHarris@rhyta.com",
"TelephoneNumber":"617-814-7807",
"StreetAddress":"4144 Gerald L. Bates Drive",
"City":"Cambridge",
"State":"MA",
"ZipCode":42141
},
{
"GivenName":"Donna",
"Surname":"Moody",
"EmailAddress":"DonnaRMoody@fleckens.hu",
"TelephoneNumber":"845-932-1838",
"StreetAddress":"1547 Bingamon Branch Road",
"City":"Lake Huntington",
"State":"NY",
"ZipCode":12752
},
{
"GivenName":"Mark",
"Surname":"Renda",
"EmailAddress":"MarkBRenda@jourrapide.com",
"TelephoneNumber":"670-483-9080",
"StreetAddress":"2408 Flint Street",
"City":"Saipan",
"State":"MP",
"ZipCode":96950
},
{
"GivenName":"William",
"Surname":"Breen",
"EmailAddress":"WilliamSBreen@superrito.com",
"TelephoneNumber":"815-629-8489",
"StreetAddress":"2316 Matthews Street",
"City":"Shirland",
"State":"IL",
"ZipCode":61079
},
{
"GivenName":"Mary",
"Surname":"Flores",
"EmailAddress":"MaryBFlores@armyspy.com",
"TelephoneNumber":"240-237-5388",
"StreetAddress":"4362 Village View Drive",
"City":"Washington",
"State":"MD",
"ZipCode":20005
},
{
"GivenName":"Raymond",
"Surname":"Williams",
"EmailAddress":"RaymondAWilliams@cuvox.de",
"TelephoneNumber":"562-570-7551",
"StreetAddress":"3813 Thompson Street",
"City":"Long Beach",
"State":"CA",
"ZipCode":90802
},
{
"GivenName":"Deirdre",
"Surname":"Ragland",
"EmailAddress":"DeirdreJRagland@cuvox.de",
"TelephoneNumber":"304-388-0942",
"StreetAddress":"399 Fulton Street",
"City":"Charleston",
"State":"WV",
"ZipCode":25301
},
{
"GivenName":"Spencer",
"Surname":"Craft",
"EmailAddress":"SpencerGCraft@teleworm.us",
"TelephoneNumber":"321-728-0012",
"StreetAddress":"2323 Rosemont Avenue",
"City":"Melbourne",
"State":"FL",
"ZipCode":32901
},
{
"GivenName":"Nellie",
"Surname":"Smith",
"EmailAddress":"NellieHSmith@superrito.com",
"TelephoneNumber":"530-870-9496",
"StreetAddress":"2586 Byers Lane",
"City":"Rancho Cordova",
"State":"CA",
"ZipCode":95742
},
{
"GivenName":"Trenton",
"Surname":"Gunn",
"EmailAddress":"TrentonSGunn@armyspy.com",
"TelephoneNumber":"734-440-8901",
"StreetAddress":"1304 Charles Street",
"City":"Farmington Hills",
"State":"MI",
"ZipCode":48335
},
{
"GivenName":"Ted",
"Surname":"Ethridge",
"EmailAddress":"TedJEthridge@armyspy.com",
"TelephoneNumber":"917-771-6678",
"StreetAddress":"4898 Settlers Lane",
"City":"New York",
"State":"NY",
"ZipCode":10011
},
{
"GivenName":"Mary",
"Surname":"Berger",
"EmailAddress":"MaryGBerger@cuvox.de",
"TelephoneNumber":"401-625-4726",
"StreetAddress":"475 Winding Way",
"City":"Tiverton",
"State":"RI",
"ZipCode":42878
},
{
"GivenName":"Shirley",
"Surname":"Roberson",
"EmailAddress":"ShirleyARoberson@teleworm.us",
"TelephoneNumber":"504-598-7400",
"StreetAddress":"4906 Big Indian",
"City":"New Orleans",
"State":"LA",
"ZipCode":70112
},
{
"GivenName":"Erna",
"Surname":"Dias",
"EmailAddress":"ErnaMDias@rhyta.com",
"TelephoneNumber":"856-315-5133",
"StreetAddress":"2788 Lee Avenue",
"City":"Camden",
"State":"NJ",
"ZipCode":48102
},
{
"GivenName":"Gale",
"Surname":"Linker",
"EmailAddress":"GaleBLinker@cuvox.de",
"TelephoneNumber":"205-763-9630",
"StreetAddress":"1287 Petunia Way",
"City":"Lincoln",
"State":"AL",
"ZipCode":35096
},
{
"GivenName":"Margaret",
"Surname":"Tillotson",
"EmailAddress":"MargaretTTillotson@einrot.com",
"TelephoneNumber":"330-410-7423",
"StreetAddress":"3891 Derek Drive",
"City":"Akron",
"State":"OH",
"ZipCode":44308
},
{
"GivenName":"Fletcher",
"Surname":"Lafleur",
"EmailAddress":"FletcherCLafleur@cuvox.de",
"TelephoneNumber":"303-462-6422",
"StreetAddress":"3365 Scheuvront Drive",
"City":"Lakewood",
"State":"CO",
"ZipCode":80214
},
{
"GivenName":"Iris",
"Surname":"Jones",
"EmailAddress":"IrisJJones@superrito.com",
"TelephoneNumber":"270-719-0613",
"StreetAddress":"3660 Coffman Alley",
"City":"Hopkinsville",
"State":"KY",
"ZipCode":42240
},
{
"GivenName":"Roger",
"Surname":"Chalker",
"EmailAddress":"RogerJChalker@cuvox.de",
"TelephoneNumber":"608-435-9380",
"StreetAddress":"1827 Lauren Drive",
"City":"Wilton",
"State":"WI",
"ZipCode":54670
},
{
"GivenName":"Betty",
"Surname":"Baker",
"EmailAddress":"BettyJBaker@fleckens.hu",
"TelephoneNumber":"212-359-2620",
"StreetAddress":"453 Farnum Road",
"City":"New York",
"State":"NY",
"ZipCode":10004
},
{
"GivenName":"Janet",
"Surname":"Doe",
"EmailAddress":"JanetJDoe@einrot.com",
"TelephoneNumber":"601-765-2874",
"StreetAddress":"4069 School House Road",
"City":"Collins",
"State":"MS",
"ZipCode":39428
},
{
"GivenName":"Mitchell",
"Surname":"Richardson",
"EmailAddress":"MitchellARichardson@cuvox.de",
"TelephoneNumber":"781-537-2422",
"StreetAddress":"4422 Stanton Hollow Road",
"City":"Cambridge",
"State":"MA",
"ZipCode":42141
},
{
"GivenName":"Gwen",
"Surname":"Garza",
"EmailAddress":"GwenSGarza@superrito.com",
"TelephoneNumber":"419-842-0842",
"StreetAddress":"2010 Upland Avenue",
"City":"Sylvania",
"State":"OH",
"ZipCode":43617
},
{
"GivenName":"Carl",
"Surname":"Turner",
"EmailAddress":"CarlSTurner@superrito.com",
"TelephoneNumber":"318-442-9203",
"StreetAddress":"2850 August Lane",
"City":"Alexandria",
"State":"LA",
"ZipCode":71301
},
{
"GivenName":"Brian",
"Surname":"Jerome",
"EmailAddress":"BrianCJerome@jourrapide.com",
"TelephoneNumber":"662-721-6607",
"StreetAddress":"1133 Rafe Lane",
"City":"Cleveland",
"State":"MS",
"ZipCode":38732
},
{
"GivenName":"Linda",
"Surname":"Vida",
"EmailAddress":"LindaHVida@armyspy.com",
"TelephoneNumber":"708-455-6186",
"StreetAddress":"4398 Hog Camp Road",
"City":"Wheeling",
"State":"IL",
"ZipCode":60090
},
{
"GivenName":"Ronald",
"Surname":"Harris",
"EmailAddress":"RonaldTHarris@fleckens.hu",
"TelephoneNumber":"717-866-6646",
"StreetAddress":"1729 Simpson Avenue",
"City":"Myerstown",
"State":"PA",
"ZipCode":17067
},
{
"GivenName":"James",
"Surname":"Barrett",
"EmailAddress":"JamesMBarrett@fleckens.hu",
"TelephoneNumber":"781-252-5502",
"StreetAddress":"4734 Burke Street",
"City":"Walpole",
"State":"MA",
"ZipCode":42081
},
{
"GivenName":"Inez",
"Surname":"Hoadley",
"EmailAddress":"InezRHoadley@superrito.com",
"TelephoneNumber":"313-441-3693",
"StreetAddress":"2919 Woodbridge Lane",
"City":"Dearborn",
"State":"MI",
"ZipCode":48126
},
{
"GivenName":"Robert",
"Surname":"Grimes",
"EmailAddress":"RobertCGrimes@dayrep.com",
"TelephoneNumber":"607-563-8836",
"StreetAddress":"2876 Frosty Lane",
"City":"Sidney",
"State":"NY",
"ZipCode":13838
},
{
"GivenName":"Forrest",
"Surname":"Mayhew",
"EmailAddress":"ForrestHMayhew@rhyta.com",
"TelephoneNumber":"312-379-5603",
"StreetAddress":"4314 Pringle Drive",
"City":"Chicago",
"State":"IL",
"ZipCode":60605
},
{
"GivenName":"Larry",
"Surname":"Aldridge",
"EmailAddress":"LarryMAldridge@dayrep.com",
"TelephoneNumber":"609-517-8300",
"StreetAddress":"4159 Lincoln Street",
"City":"Pleasantville",
"State":"NJ",
"ZipCode":48232
},
{
"GivenName":"Garnet",
"Surname":"Berger",
"EmailAddress":"GarnetTBerger@teleworm.us",
"TelephoneNumber":"810-318-8190",
"StreetAddress":"844 Don Jackson Lane",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Teresa",
"Surname":"Keeton",
"EmailAddress":"TeresaAKeeton@jourrapide.com",
"TelephoneNumber":"843-852-0659",
"StreetAddress":"3977 Camden Place",
"City":"Charleston",
"State":"SC",
"ZipCode":29407
},
{
"GivenName":"Cindy",
"Surname":"Barth",
"EmailAddress":"CindyKBarth@cuvox.de",
"TelephoneNumber":"601-806-8456",
"StreetAddress":"3837 Devils Hill Road",
"City":"Jackson",
"State":"MS",
"ZipCode":39211
},
{
"GivenName":"Benjamin",
"Surname":"Hong",
"EmailAddress":"BenjaminCHong@superrito.com",
"TelephoneNumber":"612-802-1393",
"StreetAddress":"4543 Rocket Drive",
"City":"Minneapolis",
"State":"MN",
"ZipCode":55406
},
{
"GivenName":"Nakia",
"Surname":"Sells",
"EmailAddress":"NakiaDSells@einrot.com",
"TelephoneNumber":"202-291-7784",
"StreetAddress":"2456 Passaic Street",
"City":"Washington",
"State":"DC",
"ZipCode":20011
},
{
"GivenName":"Clifton",
"Surname":"Rasmussen",
"EmailAddress":"CliftonARasmussen@einrot.com",
"TelephoneNumber":"216-335-3416",
"StreetAddress":"3639 Glenwood Avenue",
"City":"Cleveland",
"State":"OH",
"ZipCode":44109
},
{
"GivenName":"Virginia",
"Surname":"Deese",
"EmailAddress":"VirginiaJDeese@teleworm.us",
"TelephoneNumber":"706-209-6847",
"StreetAddress":"4766 Holly Street",
"City":"Athens",
"State":"GA",
"ZipCode":30601
},
{
"GivenName":"Charles",
"Surname":"Jason",
"EmailAddress":"CharlesPJason@dayrep.com",
"TelephoneNumber":"513-919-5448",
"StreetAddress":"4275 Jenna Lane",
"City":"Cincinnati",
"State":"OH",
"ZipCode":45249
},
{
"GivenName":"Christina",
"Surname":"Johnstone",
"EmailAddress":"ChristinaMJohnstone@rhyta.com",
"TelephoneNumber":"815-262-0378",
"StreetAddress":"2511 Emeral Dreams Drive",
"City":"Rockford",
"State":"IL",
"ZipCode":61101
},
{
"GivenName":"Cheryl",
"Surname":"Fields",
"EmailAddress":"CherylSFields@gustr.com",
"TelephoneNumber":"781-368-8660",
"StreetAddress":"4518 Romano Street",
"City":"Winchester",
"State":"MA",
"ZipCode":41890
},
{
"GivenName":"Inez",
"Surname":"Simpson",
"EmailAddress":"InezJSimpson@einrot.com",
"TelephoneNumber":"617-380-8712",
"StreetAddress":"3094 Aspen Court",
"City":"Boston",
"State":"MA",
"ZipCode":42110
},
{
"GivenName":"Inez",
"Surname":"Hershberger",
"EmailAddress":"InezPHershberger@cuvox.de",
"TelephoneNumber":"630-292-6200",
"StreetAddress":"1157 Walkers Ridge Way",
"City":"Hickory Hills",
"State":"IL",
"ZipCode":60457
},
{
"GivenName":"Idalia",
"Surname":"Stone",
"EmailAddress":"IdaliaCStone@einrot.com",
"TelephoneNumber":"305-985-2978",
"StreetAddress":"3536 Warner Street",
"City":"Big Pine Key",
"State":"FL",
"ZipCode":33043
},
{
"GivenName":"Teri",
"Surname":"Trantham",
"EmailAddress":"TeriETrantham@armyspy.com",
"TelephoneNumber":"845-306-4967",
"StreetAddress":"2145 Benedum Drive",
"City":"New York",
"State":"NY",
"ZipCode":10005
},
{
"GivenName":"Jacqueline",
"Surname":"Mohr",
"EmailAddress":"JacquelineJMohr@jourrapide.com",
"TelephoneNumber":"917-378-9572",
"StreetAddress":"3040 Bicetown Road",
"City":"New York",
"State":"NY",
"ZipCode":10013
},
{
"GivenName":"Carol",
"Surname":"Washington",
"EmailAddress":"CarolRWashington@teleworm.us",
"TelephoneNumber":"212-288-8365",
"StreetAddress":"410 Farnum Road",
"City":"New York",
"State":"NY",
"ZipCode":10021
},
{
"GivenName":"Noel",
"Surname":"Owens",
"EmailAddress":"NoelDOwens@teleworm.us",
"TelephoneNumber":"316-973-3142",
"StreetAddress":"3471 Henery Street",
"City":"Wichita",
"State":"KS",
"ZipCode":67202
},
{
"GivenName":"Jeanne",
"Surname":"Johnson",
"EmailAddress":"JeanneFJohnson@dayrep.com",
"TelephoneNumber":"808-438-6562",
"StreetAddress":"1147 Arron Smith Drive",
"City":"Honolulu",
"State":"HI",
"ZipCode":96818
},
{
"GivenName":"Candace",
"Surname":"Patrick",
"EmailAddress":"CandaceLPatrick@gustr.com",
"TelephoneNumber":"402-918-7889",
"StreetAddress":"2298 Bungalow Road",
"City":"Omaha",
"State":"NE",
"ZipCode":68114
},
{
"GivenName":"Stephen",
"Surname":"Farrell",
"EmailAddress":"StephenCFarrell@dayrep.com",
"TelephoneNumber":"908-768-9924",
"StreetAddress":"3535 Beechwood Avenue",
"City":"Rochelle Park",
"State":"NJ",
"ZipCode":47662
},
{
"GivenName":"Robert",
"Surname":"Piche",
"EmailAddress":"RobertAPiche@jourrapide.com",
"TelephoneNumber":"201-227-6219",
"StreetAddress":"1809 West Side Avenue",
"City":"Englewood",
"State":"NJ",
"ZipCode":47631
},
{
"GivenName":"Nora",
"Surname":"Cortez",
"EmailAddress":"NoraJCortez@gustr.com",
"TelephoneNumber":"773-847-2819",
"StreetAddress":"4192 Point Street",
"City":"Chicago",
"State":"IL",
"ZipCode":60609
},
{
"GivenName":"David",
"Surname":"Shin",
"EmailAddress":"DavidFShin@einrot.com",
"TelephoneNumber":"816-216-0040",
"StreetAddress":"2410 Nutter Street",
"City":"Kansas City",
"State":"MO",
"ZipCode":64106
},
{
"GivenName":"Michael",
"Surname":"Wilson",
"EmailAddress":"MichaelDWilson@gustr.com",
"TelephoneNumber":"724-909-0979",
"StreetAddress":"261 Spruce Drive",
"City":"Pittsburgh",
"State":"PA",
"ZipCode":15201
},
{
"GivenName":"Boris",
"Surname":"Cantrell",
"EmailAddress":"BorisMCantrell@einrot.com",
"TelephoneNumber":"515-545-0930",
"StreetAddress":"4882 Nutters Barn Lane",
"City":"Badger",
"State":"IA",
"ZipCode":50516
},
{
"GivenName":"Miguel",
"Surname":"Nason",
"EmailAddress":"MiguelHNason@dayrep.com",
"TelephoneNumber":"212-594-2308",
"StreetAddress":"4443 Oakwood Avenue",
"City":"New York",
"State":"NY",
"ZipCode":10018
},
{
"GivenName":"Jill",
"Surname":"Marshall",
"EmailAddress":"JillSMarshall@jourrapide.com",
"TelephoneNumber":"937-546-6684",
"StreetAddress":"4146 Ingram Street",
"City":"Dayton",
"State":"OH",
"ZipCode":45406
},
{
"GivenName":"Jerold",
"Surname":"Miller",
"EmailAddress":"JeroldJMiller@jourrapide.com",
"TelephoneNumber":"630-613-7557",
"StreetAddress":"38 Kembery Drive",
"City":"Chicago",
"State":"IL",
"ZipCode":60607
},
{
"GivenName":"Robert",
"Surname":"Boden",
"EmailAddress":"RobertMBoden@armyspy.com",
"TelephoneNumber":"781-975-7388",
"StreetAddress":"4750 Hummingbird Way",
"City":"Brockton",
"State":"MA",
"ZipCode":42401
},
{
"GivenName":"Lisa",
"Surname":"Pettaway",
"EmailAddress":"LisaLPettaway@fleckens.hu",
"TelephoneNumber":"530-202-8312",
"StreetAddress":"3144 Polk Street",
"City":"Sacramento",
"State":"CA",
"ZipCode":95814
},
{
"GivenName":"Andrew",
"Surname":"Moses",
"EmailAddress":"AndrewCMoses@dayrep.com",
"TelephoneNumber":"336-395-5822",
"StreetAddress":"3306 Havanna Street",
"City":"Greensboro",
"State":"NC",
"ZipCode":27401
},
{
"GivenName":"Maggie",
"Surname":"Williams",
"EmailAddress":"MaggieEWilliams@cuvox.de",
"TelephoneNumber":"540-653-1839",
"StreetAddress":"1503 White Pine Lane",
"City":"Dahlgren",
"State":"VA",
"ZipCode":22448
},
{
"GivenName":"Larry",
"Surname":"Wojtowicz",
"EmailAddress":"LarryJWojtowicz@dayrep.com",
"TelephoneNumber":"850-519-3855",
"StreetAddress":"1086 Morgan Street",
"City":"Tallahassee",
"State":"FL",
"ZipCode":32301
},
{
"GivenName":"Paul",
"Surname":"Whitten",
"EmailAddress":"PaulMWhitten@jourrapide.com",
"TelephoneNumber":"518-469-7518",
"StreetAddress":"816 Hardesty Street",
"City":"North Greenbush",
"State":"NY",
"ZipCode":12144
},
{
"GivenName":"Ryan",
"Surname":"Cureton",
"EmailAddress":"RyanMCureton@cuvox.de",
"TelephoneNumber":"541-438-7044",
"StreetAddress":"1451 Sycamore Road",
"City":"Blodgett",
"State":"OR",
"ZipCode":97326
},
{
"GivenName":"Michael",
"Surname":"Gonzalez",
"EmailAddress":"MichaelSGonzalez@fleckens.hu",
"TelephoneNumber":"606-208-5663",
"StreetAddress":"3791 Leroy Lane",
"City":"Lexington",
"State":"KY",
"ZipCode":40505
},
{
"GivenName":"Mike",
"Surname":"Pellham",
"EmailAddress":"MikeDPellham@dayrep.com",
"TelephoneNumber":"901-829-7173",
"StreetAddress":"2966 Burton Avenue",
"City":"Rosemark",
"State":"TN",
"ZipCode":38053
},
{
"GivenName":"Alan",
"Surname":"Huntley",
"EmailAddress":"AlanBHuntley@rhyta.com",
"TelephoneNumber":"973-580-7950",
"StreetAddress":"1404 Deer Ridge Drive",
"City":"Rochelle Park",
"State":"NJ",
"ZipCode":47662
},
{
"GivenName":"Rogelio",
"Surname":"Aguilar",
"EmailAddress":"RogelioAAguilar@superrito.com",
"TelephoneNumber":"586-834-9520",
"StreetAddress":"4185 Reppert Coal Road",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Minnie",
"Surname":"Creel",
"EmailAddress":"MinnieFCreel@jourrapide.com",
"TelephoneNumber":"559-212-0876",
"StreetAddress":"944 Center Street",
"City":"Fresno",
"State":"CA",
"ZipCode":93721
},
{
"GivenName":"Terrell",
"Surname":"Nash",
"EmailAddress":"TerrellJNash@gustr.com",
"TelephoneNumber":"865-354-7141",
"StreetAddress":"1026 Wiseman Street",
"City":"Rockwood",
"State":"TN",
"ZipCode":37854
},
{
"GivenName":"Jeffery",
"Surname":"Branson",
"EmailAddress":"JefferyKBranson@rhyta.com",
"TelephoneNumber":"517-630-1851",
"StreetAddress":"4685 Haven Lane",
"City":"Albion",
"State":"MI",
"ZipCode":49224
},
{
"GivenName":"Carlos",
"Surname":"Carver",
"EmailAddress":"CarlosLCarver@einrot.com",
"TelephoneNumber":"724-223-4087",
"StreetAddress":"1320 Leo Street",
"City":"Washington",
"State":"PA",
"ZipCode":15301
},
{
"GivenName":"Clara",
"Surname":"Hayes",
"EmailAddress":"ClaraAHayes@cuvox.de",
"TelephoneNumber":"502-696-4226",
"StreetAddress":"3704 Earnhardt Drive",
"City":"Frankfort",
"State":"KY",
"ZipCode":40601
},
{
"GivenName":"Stanley",
"Surname":"Farley",
"EmailAddress":"StanleyLFarley@teleworm.us",
"TelephoneNumber":"909-298-0744",
"StreetAddress":"1722 Bel Meadow Drive",
"City":"Los Angeles",
"State":"CA",
"ZipCode":90017
},
{
"GivenName":"Lillie",
"Surname":"Rosa",
"EmailAddress":"LillieARosa@dayrep.com",
"TelephoneNumber":"605-964-5789",
"StreetAddress":"3940 Leroy Lane",
"City":"Eagle Butte",
"State":"SD",
"ZipCode":57625
},
{
"GivenName":"Keith",
"Surname":"Lewis",
"EmailAddress":"KeithJLewis@superrito.com",
"TelephoneNumber":"970-871-8547",
"StreetAddress":"3190 Stark Hollow Road",
"City":"Steamboat Springs",
"State":"CO",
"ZipCode":80487
},
{
"GivenName":"Cameron",
"Surname":"Hall",
"EmailAddress":"CameronKHall@fleckens.hu",
"TelephoneNumber":"781-572-6743",
"StreetAddress":"2517 Single Street",
"City":"South Boston",
"State":"MA",
"ZipCode":42127
},
{
"GivenName":"Keith",
"Surname":"Cobb",
"EmailAddress":"KeithDCobb@rhyta.com",
"TelephoneNumber":"508-350-1773",
"StreetAddress":"3652 C Street",
"City":"East Bridgewater",
"State":"MA",
"ZipCode":42333
},
{
"GivenName":"Francis",
"Surname":"Bryant",
"EmailAddress":"FrancisDBryant@teleworm.us",
"TelephoneNumber":"503-366-7608",
"StreetAddress":"692 Mattson Street",
"City":"St Helens",
"State":"OR",
"ZipCode":97951
},
{
"GivenName":"Elizabeth",
"Surname":"Jones",
"EmailAddress":"ElizabethCJones@rhyta.com",
"TelephoneNumber":"502-570-7426",
"StreetAddress":"1915 Gregory Lane",
"City":"Georgetown",
"State":"KY",
"ZipCode":40324
},
{
"GivenName":"Wayne",
"Surname":"Asaro",
"EmailAddress":"WayneLAsaro@armyspy.com",
"TelephoneNumber":"713-467-0705",
"StreetAddress":"4121 Monroe Street",
"City":"Houston",
"State":"TX",
"ZipCode":77055
},
{
"GivenName":"Cynthia",
"Surname":"Jenkins",
"EmailAddress":"CynthiaRJenkins@einrot.com",
"TelephoneNumber":"989-257-4135",
"StreetAddress":"1495 Hart Ridge Road",
"City":"Long Lake",
"State":"MI",
"ZipCode":48743
},
{
"GivenName":"Tommy",
"Surname":"Chacon",
"EmailAddress":"TommyMChacon@cuvox.de",
"TelephoneNumber":"318-564-1310",
"StreetAddress":"3613 Norma Lane",
"City":"Shreveport",
"State":"LA",
"ZipCode":71101
},
{
"GivenName":"Peter",
"Surname":"Reich",
"EmailAddress":"PeterLReich@cuvox.de",
"TelephoneNumber":"415-820-8521",
"StreetAddress":"4180 Jim Rosa Lane",
"City":"San Jose",
"State":"CA",
"ZipCode":95131
},
{
"GivenName":"Laura",
"Surname":"Chevalier",
"EmailAddress":"LauraWChevalier@einrot.com",
"TelephoneNumber":"423-644-5754",
"StreetAddress":"2690 Raver Croft Drive",
"City":"Chattanooga",
"State":"TN",
"ZipCode":37403
},
{
"GivenName":"Terrance",
"Surname":"Jackson",
"EmailAddress":"TerranceMJackson@superrito.com",
"TelephoneNumber":"661-846-0684",
"StreetAddress":"1339 Gateway Avenue",
"City":"Bakersfield",
"State":"CA",
"ZipCode":93301
},
{
"GivenName":"Maria",
"Surname":"Burke",
"EmailAddress":"MariaDBurke@jourrapide.com",
"TelephoneNumber":"845-367-6769",
"StreetAddress":"2514 Benedum Drive",
"City":"New York",
"State":"NY",
"ZipCode":10011
},
{
"GivenName":"Lance",
"Surname":"Babcock",
"EmailAddress":"LanceSBabcock@armyspy.com",
"TelephoneNumber":"320-857-0769",
"StreetAddress":"2967 Red Hawk Road",
"City":"Grove City",
"State":"MN",
"ZipCode":56243
},
{
"GivenName":"Thomas",
"Surname":"Sellers",
"EmailAddress":"ThomasSSellers@dayrep.com",
"TelephoneNumber":"715-385-3145",
"StreetAddress":"4354 Abner Road",
"City":"Boulder Junction",
"State":"WI",
"ZipCode":54512
},
{
"GivenName":"Bella",
"Surname":"Baker",
"EmailAddress":"BellaMBaker@einrot.com",
"TelephoneNumber":"515-213-8086",
"StreetAddress":"3021 Jenna Lane",
"City":"West Des Moines",
"State":"IA",
"ZipCode":50266
},
{
"GivenName":"Matthew",
"Surname":"Powell",
"EmailAddress":"MatthewCPowell@jourrapide.com",
"TelephoneNumber":"315-493-7564",
"StreetAddress":"744 Buckhannan Avenue",
"City":"Carthage",
"State":"NY",
"ZipCode":13619
},
{
"GivenName":"Andrea",
"Surname":"Draves",
"EmailAddress":"AndreaBDraves@jourrapide.com",
"TelephoneNumber":"256-525-7510",
"StreetAddress":"3041 Maple Lane",
"City":"Anniston",
"State":"AL",
"ZipCode":36201
},
{
"GivenName":"Donna",
"Surname":"Scanlon",
"EmailAddress":"DonnaJScanlon@rhyta.com",
"TelephoneNumber":"314-977-0341",
"StreetAddress":"1857 Gandy Street",
"City":"Saint Louis",
"State":"MO",
"ZipCode":63130
},
{
"GivenName":"Neil",
"Surname":"Jones",
"EmailAddress":"NeilMJones@fleckens.hu",
"TelephoneNumber":"931-645-1054",
"StreetAddress":"1271 Chenoweth Drive",
"City":"Clarksville",
"State":"TN",
"ZipCode":37040
},
{
"GivenName":"Anthony",
"Surname":"Rood",
"EmailAddress":"AnthonyBRood@cuvox.de",
"TelephoneNumber":"307-621-4479",
"StreetAddress":"3690 Thorn Street",
"City":"Sheridan",
"State":"WY",
"ZipCode":82801
},
{
"GivenName":"Betty",
"Surname":"Benson",
"EmailAddress":"BettyFBenson@teleworm.us",
"TelephoneNumber":"509-727-9230",
"StreetAddress":"3933 Sun Valley Road",
"City":"Kennewick",
"State":"WA",
"ZipCode":99336
},
{
"GivenName":"Melissa",
"Surname":"Kelly",
"EmailAddress":"MelissaRKelly@gustr.com",
"TelephoneNumber":"650-861-9195",
"StreetAddress":"2191 Duck Creek Road",
"City":"Concord",
"State":"CA",
"ZipCode":94520
},
{
"GivenName":"Timothy",
"Surname":"Mucci",
"EmailAddress":"TimothyRMucci@superrito.com",
"TelephoneNumber":"717-227-5561",
"StreetAddress":"1222 Hidden Valley Road",
"City":"Glen Rock",
"State":"PA",
"ZipCode":17327
},
{
"GivenName":"Cathy",
"Surname":"Ortiz",
"EmailAddress":"CathyJOrtiz@gustr.com",
"TelephoneNumber":"219-944-8795",
"StreetAddress":"3946 Jadewood Drive",
"City":"Gary",
"State":"IN",
"ZipCode":46404
},
{
"GivenName":"Margaret",
"Surname":"Smith",
"EmailAddress":"MargaretJSmith@superrito.com",
"TelephoneNumber":"706-685-8987",
"StreetAddress":"685 Radio Park Drive",
"City":"Columbus",
"State":"GA",
"ZipCode":31903
},
{
"GivenName":"Gregory",
"Surname":"Ellison",
"EmailAddress":"GregoryREllison@armyspy.com",
"TelephoneNumber":"641-788-9118",
"StreetAddress":"3673 Morningview Lane",
"City":"Blockton",
"State":"IA",
"ZipCode":50836
},
{
"GivenName":"Leah",
"Surname":"Dwight",
"EmailAddress":"LeahEDwight@rhyta.com",
"TelephoneNumber":"305-241-0017",
"StreetAddress":"2316 Agriculture Lane",
"City":"Boca Raton",
"State":"FL",
"ZipCode":33487
},
{
"GivenName":"Eva",
"Surname":"Fain",
"EmailAddress":"EvaJFain@superrito.com",
"TelephoneNumber":"951-277-4792",
"StreetAddress":"3635 Hillcrest Lane",
"City":"Corona",
"State":"CA",
"ZipCode":91720
},
{
"GivenName":"Alexis",
"Surname":"Dinsmore",
"EmailAddress":"AlexisJDinsmore@gustr.com",
"TelephoneNumber":"847-625-3640",
"StreetAddress":"2530 Vine Street",
"City":"Waukegan",
"State":"IL",
"ZipCode":60085
},
{
"GivenName":"Eva",
"Surname":"Lukens",
"EmailAddress":"EvaRLukens@superrito.com",
"TelephoneNumber":"253-779-1751",
"StreetAddress":"4134 Dale Avenue",
"City":"Tacoma",
"State":"WA",
"ZipCode":98402
},
{
"GivenName":"Ellen",
"Surname":"Cox",
"EmailAddress":"EllenCCox@einrot.com",
"TelephoneNumber":"580-756-5949",
"StreetAddress":"72 Late Avenue",
"City":"Lawton",
"State":"OK",
"ZipCode":73501
},
{
"GivenName":"Amanda",
"Surname":"McCullough",
"EmailAddress":"AmandaJMcCullough@fleckens.hu",
"TelephoneNumber":"606-329-0173",
"StreetAddress":"3854 May Street",
"City":"Ashland",
"State":"KY",
"ZipCode":41101
},
{
"GivenName":"Mildred",
"Surname":"Quinlan",
"EmailAddress":"MildredVQuinlan@cuvox.de",
"TelephoneNumber":"518-723-9320",
"StreetAddress":"4595 Joes Road",
"City":"Albany",
"State":"NY",
"ZipCode":12207
},
{
"GivenName":"Vera",
"Surname":"Ortiz",
"EmailAddress":"VeraFOrtiz@fleckens.hu",
"TelephoneNumber":"928-776-6787",
"StreetAddress":"2671 Martha Street",
"City":"Prescott",
"State":"AZ",
"ZipCode":86301
},
{
"GivenName":"Roberto",
"Surname":"Piland",
"EmailAddress":"RobertoPPiland@dayrep.com",
"TelephoneNumber":"201-627-8203",
"StreetAddress":"1029 Goldleaf Lane",
"City":"Red Bank",
"State":"NJ",
"ZipCode":47701
},
{
"GivenName":"Judith",
"Surname":"Ponder",
"EmailAddress":"JudithSPonder@fleckens.hu",
"TelephoneNumber":"214-714-8601",
"StreetAddress":"4741 Carolyns Circle",
"City":"Carrollton",
"State":"TX",
"ZipCode":75006
},
{
"GivenName":"Leonard",
"Surname":"McBroom",
"EmailAddress":"LeonardLMcBroom@dayrep.com",
"TelephoneNumber":"708-907-6742",
"StreetAddress":"2813 John Calvin Drive",
"City":"Chicago",
"State":"IL",
"ZipCode":60606
},
{
"GivenName":"Rebecca",
"Surname":"Foster",
"EmailAddress":"RebeccaPFoster@cuvox.de",
"TelephoneNumber":"408-657-3352",
"StreetAddress":"1578 Friendship Lane",
"City":"San Jose",
"State":"CA",
"ZipCode":95113
},
{
"GivenName":"George",
"Surname":"Little",
"EmailAddress":"GeorgeDLittle@cuvox.de",
"TelephoneNumber":"406-933-8185",
"StreetAddress":"613 Coolidge Street",
"City":"Clancy",
"State":"MT",
"ZipCode":59634
},
{
"GivenName":"Robert",
"Surname":"Arciniega",
"EmailAddress":"RobertGArciniega@cuvox.de",
"TelephoneNumber":"281-691-7682",
"StreetAddress":"4893 Chapel Street",
"City":"Houston",
"State":"TX",
"ZipCode":77002
},
{
"GivenName":"Mark",
"Surname":"Johnson",
"EmailAddress":"MarkMJohnson@cuvox.de",
"TelephoneNumber":"719-636-1287",
"StreetAddress":"2716 Berry Street",
"City":"Colorado Springs",
"State":"CO",
"ZipCode":80903
},
{
"GivenName":"Jose",
"Surname":"Deitch",
"EmailAddress":"JoseVDeitch@dayrep.com",
"TelephoneNumber":"360-546-9109",
"StreetAddress":"4979 Honeysuckle Lane",
"City":"Vancouver",
"State":"WA",
"ZipCode":98686
},
{
"GivenName":"Charles",
"Surname":"Mathew",
"EmailAddress":"CharlesCMathew@armyspy.com",
"TelephoneNumber":"718-634-3627",
"StreetAddress":"4732 Briercliff Road",
"City":"Queens",
"State":"NY",
"ZipCode":11693
},
{
"GivenName":"Grant",
"Surname":"Sampson",
"EmailAddress":"GrantASampson@jourrapide.com",
"TelephoneNumber":"404-810-7845",
"StreetAddress":"2101 Stroop Hill Road",
"City":"Atlanta",
"State":"GA",
"ZipCode":30303
},
{
"GivenName":"Arthur",
"Surname":"Marciano",
"EmailAddress":"ArthurDMarciano@fleckens.hu",
"TelephoneNumber":"803-588-0857",
"StreetAddress":"2572 Java Lane",
"City":"Bishopville",
"State":"SC",
"ZipCode":29010
},
{
"GivenName":"Alma",
"Surname":"Michel",
"EmailAddress":"AlmaJMichel@rhyta.com",
"TelephoneNumber":"410-423-6165",
"StreetAddress":"4107 Columbia Boulevard",
"City":"Baltimore",
"State":"MD",
"ZipCode":21201
},
{
"GivenName":"Lucille",
"Surname":"Lange",
"EmailAddress":"LucilleMLange@superrito.com",
"TelephoneNumber":"619-287-2041",
"StreetAddress":"4389 Holden Street",
"City":"San Diego",
"State":"CA",
"ZipCode":92115
},
{
"GivenName":"Velva",
"Surname":"Warren",
"EmailAddress":"VelvaWWarren@dayrep.com",
"TelephoneNumber":"952-942-1700",
"StreetAddress":"628 Sycamore Fork Road",
"City":"Eden Prairie",
"State":"MN",
"ZipCode":55344
},
{
"GivenName":"Cathy",
"Surname":"Jones",
"EmailAddress":"CathyBJones@jourrapide.com",
"TelephoneNumber":"206-245-5230",
"StreetAddress":"2707 Owagner Lane",
"City":"Seattle",
"State":"WA",
"ZipCode":98109
},
{
"GivenName":"Brian",
"Surname":"Reed",
"EmailAddress":"BrianSReed@einrot.com",
"TelephoneNumber":"515-710-5372",
"StreetAddress":"1254 Nutters Barn Lane",
"City":"Urbandale",
"State":"IA",
"ZipCode":50322
},
{
"GivenName":"Dustin",
"Surname":"West",
"EmailAddress":"DustinSWest@cuvox.de",
"TelephoneNumber":"270-379-5661",
"StreetAddress":"921 Glen Street",
"City":"Fairplay",
"State":"KY",
"ZipCode":42735
},
{
"GivenName":"Demetrius",
"Surname":"Underwood",
"EmailAddress":"DemetriusJUnderwood@armyspy.com",
"TelephoneNumber":"252-876-4126",
"StreetAddress":"344 Fort Street",
"City":"New Bern",
"State":"NC",
"ZipCode":28562
},
{
"GivenName":"David",
"Surname":"Payne",
"EmailAddress":"DavidCPayne@teleworm.us",
"TelephoneNumber":"412-376-1313",
"StreetAddress":"1866 Jacobs Street",
"City":"Monroeville",
"State":"PA",
"ZipCode":15146
},
{
"GivenName":"Carl",
"Surname":"Newson",
"EmailAddress":"CarlBNewson@cuvox.de",
"TelephoneNumber":"609-840-4704",
"StreetAddress":"2730 Whiteman Street",
"City":"Pleasantville",
"State":"NJ",
"ZipCode":48232
},
{
"GivenName":"Sarah",
"Surname":"Corbin",
"EmailAddress":"SarahBCorbin@armyspy.com",
"TelephoneNumber":"916-289-9404",
"StreetAddress":"1340 Pearl Street",
"City":"Mcclellan Afb",
"State":"CA",
"ZipCode":95652
},
{
"GivenName":"Deborah",
"Surname":"Bailey",
"EmailAddress":"DeborahJBailey@jourrapide.com",
"TelephoneNumber":"605-749-1514",
"StreetAddress":"1308 Hartway Street",
"City":"Newcastle",
"State":"SD",
"ZipCode":82701
},
{
"GivenName":"Mark",
"Surname":"Neufeld",
"EmailAddress":"MarkCNeufeld@jourrapide.com",
"TelephoneNumber":"580-688-3957",
"StreetAddress":"4260 Simpson Square",
"City":"Hollis",
"State":"OK",
"ZipCode":73550
},
{
"GivenName":"Alvina",
"Surname":"Thornton",
"EmailAddress":"AlvinaJThornton@einrot.com",
"TelephoneNumber":"303-985-3428",
"StreetAddress":"1210 Tavern Place",
"City":"Lakewood",
"State":"CO",
"ZipCode":80227
},
{
"GivenName":"James",
"Surname":"Arrington",
"EmailAddress":"JamesPArrington@dayrep.com",
"TelephoneNumber":"585-563-8416",
"StreetAddress":"3198 James Street",
"City":"Fairport",
"State":"NY",
"ZipCode":14450
},
{
"GivenName":"Jolene",
"Surname":"Yuen",
"EmailAddress":"JoleneDYuen@fleckens.hu",
"TelephoneNumber":"716-463-0771",
"StreetAddress":"4285 Cameron Road",
"City":"Buffalo",
"State":"NY",
"ZipCode":14202
},
{
"GivenName":"Patricia",
"Surname":"Johnson",
"EmailAddress":"PatriciaGJohnson@teleworm.us",
"TelephoneNumber":"760-689-1353",
"StreetAddress":"1881 Vernon Street",
"City":"California",
"State":"CA",
"ZipCode":42110
},
{
"GivenName":"David",
"Surname":"McVay",
"EmailAddress":"DavidJMcVay@fleckens.hu",
"TelephoneNumber":"443-277-9296",
"StreetAddress":"3515 Five Points",
"City":"Baltimore",
"State":"MD",
"ZipCode":21201
},
{
"GivenName":"Francis",
"Surname":"Willingham",
"EmailAddress":"FrancisJWillingham@gustr.com",
"TelephoneNumber":"828-462-8597",
"StreetAddress":"1900 Diamond Street",
"City":"Asheville",
"State":"NC",
"ZipCode":28801
},
{
"GivenName":"Austin",
"Surname":"Henry",
"EmailAddress":"AustinGHenry@gustr.com",
"TelephoneNumber":"732-468-8023",
"StreetAddress":"361 Finwood Road",
"City":"Freehold",
"State":"NJ",
"ZipCode":47728
},
{
"GivenName":"Leslie",
"Surname":"Robison",
"EmailAddress":"LeslieJRobison@jourrapide.com",
"TelephoneNumber":"623-889-4948",
"StreetAddress":"68 Rainbow Road",
"City":"Phoenix",
"State":"AZ",
"ZipCode":85003
},
{
"GivenName":"John",
"Surname":"Deck",
"EmailAddress":"JohnJDeck@dayrep.com",
"TelephoneNumber":"321-383-8324",
"StreetAddress":"476 Terry Lane",
"City":"Titusville",
"State":"FL",
"ZipCode":32796
},
{
"GivenName":"Laura",
"Surname":"Whitaker",
"EmailAddress":"LauraBWhitaker@rhyta.com",
"TelephoneNumber":"956-686-7230",
"StreetAddress":"4713 Adamsville Road",
"City":"Mcallen",
"State":"TX",
"ZipCode":78501
},
{
"GivenName":"Courtney",
"Surname":"McDaniel",
"EmailAddress":"CourtneyLMcDaniel@cuvox.de",
"TelephoneNumber":"269-612-7253",
"StreetAddress":"2910 Goff Avenue",
"City":"Grand Rapids",
"State":"MI",
"ZipCode":49503
},
{
"GivenName":"Elia",
"Surname":"Cristobal",
"EmailAddress":"EliaBCristobal@teleworm.us",
"TelephoneNumber":"801-283-7721",
"StreetAddress":"418 Philadelphia Avenue",
"City":"Salt Lake City",
"State":"UT",
"ZipCode":84116
},
{
"GivenName":"Andrea",
"Surname":"Stephens",
"EmailAddress":"AndreaBStephens@fleckens.hu",
"TelephoneNumber":"612-279-4369",
"StreetAddress":"2313 Lodgeville Road",
"City":"Golden Valley",
"State":"MN",
"ZipCode":55427
},
{
"GivenName":"June",
"Surname":"McCoy",
"EmailAddress":"JuneDMcCoy@teleworm.us",
"TelephoneNumber":"617-248-6977",
"StreetAddress":"465 Lynn Street",
"City":"Boston",
"State":"MA",
"ZipCode":42114
},
{
"GivenName":"Diane",
"Surname":"Johnson",
"EmailAddress":"DianeJJohnson@jourrapide.com",
"TelephoneNumber":"317-385-0415",
"StreetAddress":"3942 Clay Street",
"City":"Indianapolis",
"State":"IN",
"ZipCode":46254
},
{
"GivenName":"Barry",
"Surname":"Torres",
"EmailAddress":"BarryBTorres@armyspy.com",
"TelephoneNumber":"856-348-4569",
"StreetAddress":"859 Lee Avenue",
"City":"Camden",
"State":"NJ",
"ZipCode":48102
},
{
"GivenName":"Lindsey",
"Surname":"Griffin",
"EmailAddress":"LindseyMGriffin@jourrapide.com",
"TelephoneNumber":"817-551-3887",
"StreetAddress":"4026 Jones Street",
"City":"Fort Worth",
"State":"TX",
"ZipCode":76134
},
{
"GivenName":"Victor",
"Surname":"Sneller",
"EmailAddress":"VictorSSneller@rhyta.com",
"TelephoneNumber":"817-664-4484",
"StreetAddress":"3080 Sardis Sta",
"City":"Dallas",
"State":"TX",
"ZipCode":75207
},
{
"GivenName":"Diane",
"Surname":"Custer",
"EmailAddress":"DianeFCuster@jourrapide.com",
"TelephoneNumber":"307-732-8766",
"StreetAddress":"1557 Archwood Avenue",
"City":"Jackson",
"State":"WY",
"ZipCode":83001
},
{
"GivenName":"Elsie",
"Surname":"Yost",
"EmailAddress":"ElsieJYost@fleckens.hu",
"TelephoneNumber":"212-682-4623",
"StreetAddress":"4780 Oakwood Avenue",
"City":"New York",
"State":"NY",
"ZipCode":10016
},
{
"GivenName":"Shawn",
"Surname":"Smith",
"EmailAddress":"ShawnKSmith@cuvox.de",
"TelephoneNumber":"317-814-9491",
"StreetAddress":"2610 Birch Street",
"City":"Indianapolis",
"State":"IN",
"ZipCode":46225
},
{
"GivenName":"Sarah",
"Surname":"Cutler",
"EmailAddress":"SarahECutler@einrot.com",
"TelephoneNumber":"910-375-3935",
"StreetAddress":"2224 Twin Willow Lane",
"City":"Fayetteville",
"State":"NC",
"ZipCode":28301
},
{
"GivenName":"Randy",
"Surname":"Ezzell",
"EmailAddress":"RandySEzzell@gustr.com",
"TelephoneNumber":"478-589-0183",
"StreetAddress":"57 Graystone Lakes",
"City":"Midville",
"State":"GA",
"ZipCode":30441
},
{
"GivenName":"Jessica",
"Surname":"Shirley",
"EmailAddress":"JessicaHShirley@gustr.com",
"TelephoneNumber":"859-263-9322",
"StreetAddress":"2510 Carson Street",
"City":"Lexington",
"State":"KY",
"ZipCode":40509
},
{
"GivenName":"Jack",
"Surname":"Barter",
"EmailAddress":"JackMBarter@dayrep.com",
"TelephoneNumber":"910-590-5234",
"StreetAddress":"591 Happy Hollow Road",
"City":"Clinton",
"State":"NC",
"ZipCode":28328
},
{
"GivenName":"Page",
"Surname":"Garcia",
"EmailAddress":"PageOGarcia@superrito.com",
"TelephoneNumber":"507-233-0220",
"StreetAddress":"2930 Rosewood Court",
"City":"New Ulm",
"State":"MN",
"ZipCode":56073
},
{
"GivenName":"Kirk",
"Surname":"Sanches",
"EmailAddress":"KirkSSanches@cuvox.de",
"TelephoneNumber":"409-277-0390",
"StreetAddress":"1835 Brookview Drive",
"City":"Beaumont",
"State":"TX",
"ZipCode":77701
},
{
"GivenName":"Jon",
"Surname":"Douglas",
"EmailAddress":"JonTDouglas@teleworm.us",
"TelephoneNumber":"602-997-7198",
"StreetAddress":"1527 Crowfield Road",
"City":"Phoenix",
"State":"AZ",
"ZipCode":85021
},
{
"GivenName":"Freddie",
"Surname":"Hendershot",
"EmailAddress":"FreddieMHendershot@cuvox.de",
"TelephoneNumber":"480-615-9427",
"StreetAddress":"3007 Hillside Street",
"City":"Mesa",
"State":"AZ",
"ZipCode":85201
},
{
"GivenName":"Yolanda",
"Surname":"Barbour",
"EmailAddress":"YolandaRBarbour@dayrep.com",
"TelephoneNumber":"219-977-8518",
"StreetAddress":"1293 Jadewood Drive",
"City":"Gary",
"State":"IN",
"ZipCode":46404
},
{
"GivenName":"Nora",
"Surname":"Macklin",
"EmailAddress":"NoraEMacklin@armyspy.com",
"TelephoneNumber":"516-815-4482",
"StreetAddress":"2213 Westwood Avenue",
"City":"Huntington",
"State":"NY",
"ZipCode":11743
},
{
"GivenName":"Ryan",
"Surname":"Rickman",
"EmailAddress":"RyanARickman@jourrapide.com",
"TelephoneNumber":"503-619-1643",
"StreetAddress":"2552 Godfrey Street",
"City":"Portland",
"State":"OR",
"ZipCode":97205
},
{
"GivenName":"Martha",
"Surname":"Brazeal",
"EmailAddress":"MarthaMBrazeal@einrot.com",
"TelephoneNumber":"256-689-6956",
"StreetAddress":"131 Ferry Street",
"City":"Birmingham",
"State":"AL",
"ZipCode":35209
},
{
"GivenName":"Martin",
"Surname":"Cabral",
"EmailAddress":"MartinMCabral@superrito.com",
"TelephoneNumber":"956-579-3244",
"StreetAddress":"1586 Adamsville Road",
"City":"Brownsville",
"State":"TX",
"ZipCode":78520
},
{
"GivenName":"Melissa",
"Surname":"Barnes",
"EmailAddress":"MelissaRBarnes@jourrapide.com",
"TelephoneNumber":"949-200-5953",
"StreetAddress":"355 Heavens Way",
"City":"Santa Ana",
"State":"CA",
"ZipCode":92705
},
{
"GivenName":"Barbara",
"Surname":"Cintron",
"EmailAddress":"BarbaraTCintron@rhyta.com",
"TelephoneNumber":"608-219-3969",
"StreetAddress":"1027 Browning Lane",
"City":"Madison",
"State":"WI",
"ZipCode":53718
},
{
"GivenName":"Phyllis",
"Surname":"Turner",
"EmailAddress":"PhyllisCTurner@einrot.com",
"TelephoneNumber":"207-628-9415",
"StreetAddress":"261 Bloomfield Way",
"City":"North New Portland",
"State":"ME",
"ZipCode":44961
},
{
"GivenName":"Frances",
"Surname":"Derr",
"EmailAddress":"FrancesEDerr@armyspy.com",
"TelephoneNumber":"731-298-3523",
"StreetAddress":"4690 Mapleview Drive",
"City":"Memphis",
"State":"TN",
"ZipCode":38116
},
{
"GivenName":"Felicia",
"Surname":"Lowman",
"EmailAddress":"FeliciaTLowman@cuvox.de",
"TelephoneNumber":"210-293-0332",
"StreetAddress":"2125 Weekley Street",
"City":"San Antonio",
"State":"TX",
"ZipCode":78205
},
{
"GivenName":"Harry",
"Surname":"Braden",
"EmailAddress":"HarryABraden@dayrep.com",
"TelephoneNumber":"630-663-5943",
"StreetAddress":"1898 Steele Street",
"City":"Downers Grove",
"State":"IL",
"ZipCode":60515
},
{
"GivenName":"Walter",
"Surname":"Wilde",
"EmailAddress":"WalterKWilde@armyspy.com",
"TelephoneNumber":"207-398-4457",
"StreetAddress":"4740 Victoria Court",
"City":"Fort Kent",
"State":"ME",
"ZipCode":44743
},
{
"GivenName":"Neal",
"Surname":"Ogburn",
"EmailAddress":"NealTOgburn@rhyta.com",
"TelephoneNumber":"941-754-5409",
"StreetAddress":"4161 Monroe Avenue",
"City":"Fort Myers",
"State":"FL",
"ZipCode":33901
},
{
"GivenName":"Nancy",
"Surname":"Forbes",
"EmailAddress":"NancyMForbes@superrito.com",
"TelephoneNumber":"970-679-7605",
"StreetAddress":"1589 Pick Street",
"City":"Loveland",
"State":"CO",
"ZipCode":80537
},
{
"GivenName":"Barbara",
"Surname":"Pauling",
"EmailAddress":"BarbaraWPauling@superrito.com",
"TelephoneNumber":"334-494-9056",
"StreetAddress":"190 Turkey Pen Lane",
"City":"Montgomery",
"State":"AL",
"ZipCode":36117
},
{
"GivenName":"Mike",
"Surname":"Weyant",
"EmailAddress":"MikeAWeyant@superrito.com",
"TelephoneNumber":"205-481-1594",
"StreetAddress":"562 Broad Street",
"City":"Bessemer",
"State":"AL",
"ZipCode":35020
},
{
"GivenName":"Randall",
"Surname":"Millican",
"EmailAddress":"RandallSMillican@dayrep.com",
"TelephoneNumber":"847-625-2637",
"StreetAddress":"1732 Vine Street",
"City":"Waukegan",
"State":"IL",
"ZipCode":60085
},
{
"GivenName":"Russell",
"Surname":"Potter",
"EmailAddress":"RussellRPotter@rhyta.com",
"TelephoneNumber":"270-975-1595",
"StreetAddress":"592 Shady Pines Drive",
"City":"Bowling Green",
"State":"KY",
"ZipCode":42101
},
{
"GivenName":"Geneva",
"Surname":"Garcia",
"EmailAddress":"GenevaRGarcia@gustr.com",
"TelephoneNumber":"502-229-8289",
"StreetAddress":"3782 Radford Street",
"City":"Louisville",
"State":"KY",
"ZipCode":40299
},
{
"GivenName":"William",
"Surname":"Eldridge",
"EmailAddress":"WilliamGEldridge@dayrep.com",
"TelephoneNumber":"507-630-4843",
"StreetAddress":"2400 Sugar Camp Road",
"City":"St James",
"State":"MN",
"ZipCode":56081
},
{
"GivenName":"James",
"Surname":"Danielson",
"EmailAddress":"JamesLDanielson@rhyta.com",
"TelephoneNumber":"502-339-0796",
"StreetAddress":"4640 Radford Street",
"City":"Louisville",
"State":"KY",
"ZipCode":40242
},
{
"GivenName":"Lucile",
"Surname":"Gordon",
"EmailAddress":"LucileJGordon@armyspy.com",
"TelephoneNumber":"207-691-1020",
"StreetAddress":"4935 Fantages Way",
"City":"Bangor",
"State":"ME",
"ZipCode":44401
},
{
"GivenName":"Coleman",
"Surname":"Kanode",
"EmailAddress":"ColemanKKanode@einrot.com",
"TelephoneNumber":"817-837-1089",
"StreetAddress":"3204 Baker Avenue",
"City":"Roanoke",
"State":"TX",
"ZipCode":76262
},
{
"GivenName":"John",
"Surname":"Poli",
"EmailAddress":"JohnPPoli@jourrapide.com",
"TelephoneNumber":"206-533-0142",
"StreetAddress":"1011 Elliot Avenue",
"City":"Richmond Beach",
"State":"WA",
"ZipCode":98177
},
{
"GivenName":"David",
"Surname":"Arnold",
"EmailAddress":"DavidMArnold@fleckens.hu",
"TelephoneNumber":"312-859-8066",
"StreetAddress":"3557 Tator Patch Road",
"City":"Hickory Hills",
"State":"IL",
"ZipCode":60457
},
{
"GivenName":"Marian",
"Surname":"Davis",
"EmailAddress":"MarianRDavis@superrito.com",
"TelephoneNumber":"760-853-8494",
"StreetAddress":"3035 Wilson Street",
"City":"San Diego",
"State":"CA",
"ZipCode":92103
},
{
"GivenName":"Pamela",
"Surname":"Madden",
"EmailAddress":"PamelaJMadden@fleckens.hu",
"TelephoneNumber":"973-325-0776",
"StreetAddress":"1951 Jadewood Farms",
"City":"West Orange",
"State":"NJ",
"ZipCode":47052
},
{
"GivenName":"Anna",
"Surname":"Anderson",
"EmailAddress":"AnnaMAnderson@teleworm.us",
"TelephoneNumber":"850-536-6490",
"StreetAddress":"4347 Morgan Street",
"City":"Tallahassee",
"State":"FL",
"ZipCode":32303
},
{
"GivenName":"John",
"Surname":"Sanchez",
"EmailAddress":"JohnESanchez@armyspy.com",
"TelephoneNumber":"210-470-8531",
"StreetAddress":"977 Todds Lane",
"City":"San Antonio",
"State":"TX",
"ZipCode":78210
},
{
"GivenName":"Sarah",
"Surname":"Williams",
"EmailAddress":"SarahTWilliams@cuvox.de",
"TelephoneNumber":"216-964-6015",
"StreetAddress":"2974 Cardinal Lane",
"City":"Mayfield Heights",
"State":"OH",
"ZipCode":44124
},
{
"GivenName":"Sarah",
"Surname":"Graver",
"EmailAddress":"SarahRGraver@gustr.com",
"TelephoneNumber":"330-633-7238",
"StreetAddress":"3269 Little Street",
"City":"Tallmadge",
"State":"OH",
"ZipCode":44278
},
{
"GivenName":"Chad",
"Surname":"Jaramillo",
"EmailAddress":"ChadOJaramillo@superrito.com",
"TelephoneNumber":"917-389-0766",
"StreetAddress":"3551 Bicetown Road",
"City":"New York",
"State":"NY",
"ZipCode":10016
},
{
"GivenName":"Charles",
"Surname":"Griffin",
"EmailAddress":"CharlesAGriffin@teleworm.us",
"TelephoneNumber":"901-334-2229",
"StreetAddress":"2778 Woodridge Lane",
"City":"Memphis",
"State":"TN",
"ZipCode":38110
},
{
"GivenName":"Maria",
"Surname":"Scarberry",
"EmailAddress":"MariaJScarberry@superrito.com",
"TelephoneNumber":"920-503-6610",
"StreetAddress":"559 Tail Ends Road",
"City":"Plymouth",
"State":"WI",
"ZipCode":53073
},
{
"GivenName":"Jordan",
"Surname":"May",
"EmailAddress":"JordanBMay@einrot.com",
"TelephoneNumber":"507-377-2254",
"StreetAddress":"1916 Pritchard Court",
"City":"Albert Lea",
"State":"MN",
"ZipCode":56007
},
{
"GivenName":"Irene",
"Surname":"Long",
"EmailAddress":"IreneJLong@rhyta.com",
"TelephoneNumber":"920-308-5003",
"StreetAddress":"764 Hartland Avenue",
"City":"Green Bay",
"State":"WI",
"ZipCode":54303
},
{
"GivenName":"James",
"Surname":"Horton",
"EmailAddress":"JamesGHorton@teleworm.us",
"TelephoneNumber":"308-684-0292",
"StreetAddress":"2978 Rollins Road",
"City":"Merriman",
"State":"NE",
"ZipCode":69218
},
{
"GivenName":"Jane",
"Surname":"Hopkins",
"EmailAddress":"JaneWHopkins@jourrapide.com",
"TelephoneNumber":"215-953-9478",
"StreetAddress":"522 Hiddenview Drive",
"City":"Churchville",
"State":"PA",
"ZipCode":18966
},
{
"GivenName":"Joseph",
"Surname":"Lee",
"EmailAddress":"JosephDLee@superrito.com",
"TelephoneNumber":"732-515-2477",
"StreetAddress":"3583 Finwood Road",
"City":"Jackson",
"State":"NJ",
"ZipCode":48537
},
{
"GivenName":"Lisa",
"Surname":"Henson",
"EmailAddress":"LisaRHenson@fleckens.hu",
"TelephoneNumber":"718-448-4765",
"StreetAddress":"1722 Church Street",
"City":"Staten Island",
"State":"NY",
"ZipCode":10301
},
{
"GivenName":"Betty",
"Surname":"Inoue",
"EmailAddress":"BettySInoue@superrito.com",
"TelephoneNumber":"586-232-4499",
"StreetAddress":"3289 Cherry Ridge Drive",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Larry",
"Surname":"Williams",
"EmailAddress":"LarryNWilliams@fleckens.hu",
"TelephoneNumber":"615-785-6075",
"StreetAddress":"4621 Buffalo Creek Road",
"City":"Nashville",
"State":"TN",
"ZipCode":37209
},
{
"GivenName":"William",
"Surname":"Sanders",
"EmailAddress":"WilliamKSanders@superrito.com",
"TelephoneNumber":"602-659-2330",
"StreetAddress":"4856 Dogwood Road",
"City":"Phoenix",
"State":"AZ",
"ZipCode":85040
},
{
"GivenName":"Hilda",
"Surname":"Spiers",
"EmailAddress":"HildaHSpiers@jourrapide.com",
"TelephoneNumber":"417-432-4322",
"StreetAddress":"2711 Twin House Lane",
"City":"Schell City",
"State":"MO",
"ZipCode":64783
},
{
"GivenName":"Edna",
"Surname":"Haddix",
"EmailAddress":"EdnaCHaddix@armyspy.com",
"TelephoneNumber":"308-624-5097",
"StreetAddress":"3591 Kyle Street",
"City":"Grand Island",
"State":"NE",
"ZipCode":68803
},
{
"GivenName":"Delia",
"Surname":"Parrott",
"EmailAddress":"DeliaWParrott@jourrapide.com",
"TelephoneNumber":"540-439-6777",
"StreetAddress":"3693 Jehovah Drive",
"City":"Remington",
"State":"VA",
"ZipCode":22734
},
{
"GivenName":"Doyle",
"Surname":"West",
"EmailAddress":"DoyleWWest@fleckens.hu",
"TelephoneNumber":"360-374-2280",
"StreetAddress":"2940 Terra Street",
"City":"Forks",
"State":"WA",
"ZipCode":98331
},
{
"GivenName":"Pamela",
"Surname":"Wrenn",
"EmailAddress":"PamelaAWrenn@einrot.com",
"TelephoneNumber":"863-266-4209",
"StreetAddress":"628 Tetrick Road",
"City":"Fort Myers",
"State":"FL",
"ZipCode":33901
},
{
"GivenName":"Cassandra",
"Surname":"McKean",
"EmailAddress":"CassandraKMcKean@superrito.com",
"TelephoneNumber":"918-733-2837",
"StreetAddress":"2602 Heather Sees Way",
"City":"Morris",
"State":"OK",
"ZipCode":74445
},
{
"GivenName":"Aida",
"Surname":"Fountain",
"EmailAddress":"AidaBFountain@jourrapide.com",
"TelephoneNumber":"907-279-5041",
"StreetAddress":"4327 Veltri Drive",
"City":"Anchorage",
"State":"AK",
"ZipCode":99501
},
{
"GivenName":"Troy",
"Surname":"Rhea",
"EmailAddress":"TroyBRhea@cuvox.de",
"TelephoneNumber":"317-884-6886",
"StreetAddress":"4334 Birch Street",
"City":"Greenwood",
"State":"IN",
"ZipCode":46142
},
{
"GivenName":"Josephine",
"Surname":"Scott",
"EmailAddress":"JosephineRScott@gustr.com",
"TelephoneNumber":"305-474-6753",
"StreetAddress":"4087 Marigold Lane",
"City":"Opa Locka",
"State":"FL",
"ZipCode":33056
},
{
"GivenName":"Rodolfo",
"Surname":"Perry",
"EmailAddress":"RodolfoDPerry@superrito.com",
"TelephoneNumber":"517-930-8830",
"StreetAddress":"1289 John Avenue",
"City":"Okemos",
"State":"MI",
"ZipCode":48864
},
{
"GivenName":"Mary",
"Surname":"Cooley",
"EmailAddress":"MarySCooley@gustr.com",
"TelephoneNumber":"773-604-4398",
"StreetAddress":"2467 Virginia Street",
"City":"Chicago",
"State":"IL",
"ZipCode":60618
},
{
"GivenName":"Salvatore",
"Surname":"Walter",
"EmailAddress":"SalvatoreHWalter@rhyta.com",
"TelephoneNumber":"419-578-6333",
"StreetAddress":"957 Stonecoal Road",
"City":"Toledo",
"State":"OH",
"ZipCode":43607
},
{
"GivenName":"Benjamin",
"Surname":"Olive",
"EmailAddress":"BenjaminVOlive@rhyta.com",
"TelephoneNumber":"201-670-5560",
"StreetAddress":"1945 Goldleaf Lane",
"City":"Ridgewood",
"State":"NJ",
"ZipCode":47450
},
{
"GivenName":"Thomas",
"Surname":"Cook",
"EmailAddress":"ThomasBCook@superrito.com",
"TelephoneNumber":"517-526-6735",
"StreetAddress":"3776 Elk Avenue",
"City":"Lansing",
"State":"MI",
"ZipCode":48933
},
{
"GivenName":"Craig",
"Surname":"Aponte",
"EmailAddress":"CraigCAponte@jourrapide.com",
"TelephoneNumber":"916-933-0681",
"StreetAddress":"4565 Woodland Terrace",
"City":"Folsom",
"State":"CA",
"ZipCode":95630
},
{
"GivenName":"Michael",
"Surname":"Fulmore",
"EmailAddress":"MichaelAFulmore@gustr.com",
"TelephoneNumber":"503-826-1560",
"StreetAddress":"2039 Heron Way",
"City":"Sandy",
"State":"OR",
"ZipCode":97055
},
{
"GivenName":"April",
"Surname":"Koontz",
"EmailAddress":"AprilJKoontz@cuvox.de",
"TelephoneNumber":"337-212-7545",
"StreetAddress":"2172 Sherwood Circle",
"City":"Lafayette",
"State":"LA",
"ZipCode":70583
},
{
"GivenName":"Susan",
"Surname":"Hammond",
"EmailAddress":"SusanRHammond@rhyta.com",
"TelephoneNumber":"213-914-4112",
"StreetAddress":"944 Aviation Way",
"City":"Los Angeles",
"State":"CA",
"ZipCode":90026
},
{
"GivenName":"Jesus",
"Surname":"Collazo",
"EmailAddress":"JesusLCollazo@dayrep.com",
"TelephoneNumber":"415-558-3876",
"StreetAddress":"1130 Harrison Street",
"City":"San Francisco",
"State":"CA",
"ZipCode":94103
},
{
"GivenName":"Troy",
"Surname":"Gower",
"EmailAddress":"TroyDGower@armyspy.com",
"TelephoneNumber":"804-543-4187",
"StreetAddress":"3751 Biddie Lane",
"City":"Richmond",
"State":"VA",
"ZipCode":23222
},
{
"GivenName":"Kurt",
"Surname":"Greene",
"EmailAddress":"KurtLGreene@armyspy.com",
"TelephoneNumber":"432-523-1617",
"StreetAddress":"2308 Laurel Lane",
"City":"Andrews",
"State":"TX",
"ZipCode":79714
},
{
"GivenName":"Frederic",
"Surname":"Rodgers",
"EmailAddress":"FredericDRodgers@superrito.com",
"TelephoneNumber":"337-506-5258",
"StreetAddress":"1503 Sarah Drive",
"City":"Ville Platte",
"State":"LA",
"ZipCode":70586
},
{
"GivenName":"Lawrence",
"Surname":"Rose",
"EmailAddress":"LawrenceKRose@dayrep.com",
"TelephoneNumber":"540-483-7721",
"StreetAddress":"192 Jehovah Drive",
"City":"Rocky Mount",
"State":"VA",
"ZipCode":24151
},
{
"GivenName":"Nancy",
"Surname":"Cox",
"EmailAddress":"NancyJCox@cuvox.de",
"TelephoneNumber":"337-532-1729",
"StreetAddress":"2094 Sarah Drive",
"City":"Lake Charles",
"State":"LA",
"ZipCode":70629
},
{
"GivenName":"Nanette",
"Surname":"Krueger",
"EmailAddress":"NanetteDKrueger@dayrep.com",
"TelephoneNumber":"573-459-6377",
"StreetAddress":"4663 Oak Ridge Drive",
"City":"New Haven",
"State":"MO",
"ZipCode":63066
},
{
"GivenName":"Steven",
"Surname":"Pounds",
"EmailAddress":"StevenCPounds@teleworm.us",
"TelephoneNumber":"989-292-6849",
"StreetAddress":"4209 Hart Ridge Road",
"City":"Bay City",
"State":"MI",
"ZipCode":48706
},
{
"GivenName":"Donna",
"Surname":"Harris",
"EmailAddress":"DonnaCHarris@fleckens.hu",
"TelephoneNumber":"214-719-9603",
"StreetAddress":"1824 Carolyns Circle",
"City":"Dallas",
"State":"TX",
"ZipCode":75204
},
{
"GivenName":"Lea",
"Surname":"McKinney",
"EmailAddress":"LeaGMcKinney@teleworm.us",
"TelephoneNumber":"229-329-1765",
"StreetAddress":"3662 Private Lane",
"City":"Montgomery",
"State":"GA",
"ZipCode":36104
},
{
"GivenName":"Patricia",
"Surname":"Anderson",
"EmailAddress":"PatriciaBAnderson@einrot.com",
"TelephoneNumber":"860-649-3892",
"StreetAddress":"1012 Copperhead Road",
"City":"Manchester",
"State":"CT",
"ZipCode":46040
},
{
"GivenName":"Larry",
"Surname":"Wester",
"EmailAddress":"LarryBWester@fleckens.hu",
"TelephoneNumber":"978-756-6293",
"StreetAddress":"285 Christie Way",
"City":"Worcester",
"State":"MA",
"ZipCode":41610
},
{
"GivenName":"Helen",
"Surname":"Spencer",
"EmailAddress":"HelenTSpencer@gustr.com",
"TelephoneNumber":"717-309-6774",
"StreetAddress":"2442 Stout Street",
"City":"York",
"State":"PA",
"ZipCode":17403
},
{
"GivenName":"Vincent",
"Surname":"Brown",
"EmailAddress":"VincentDBrown@teleworm.us",
"TelephoneNumber":"931-405-7474",
"StreetAddress":"2458 McDowell Street",
"City":"Gainesboro",
"State":"TN",
"ZipCode":38562
},
{
"GivenName":"James",
"Surname":"Bales",
"EmailAddress":"JamesMBales@cuvox.de",
"TelephoneNumber":"310-823-0597",
"StreetAddress":"2521 Meadowbrook Mall Road",
"City":"Playa Del Rey",
"State":"CA",
"ZipCode":90291
},
{
"GivenName":"Louis",
"Surname":"Fulton",
"EmailAddress":"LouisCFulton@cuvox.de",
"TelephoneNumber":"931-206-5547",
"StreetAddress":"3231 Farm Meadow Drive",
"City":"Nashville",
"State":"TN",
"ZipCode":37209
},
{
"GivenName":"Henry",
"Surname":"Jarvis",
"EmailAddress":"HenryKJarvis@rhyta.com",
"TelephoneNumber":"910-766-8806",
"StreetAddress":"1668 Armory Road",
"City":"Clinton",
"State":"NC",
"ZipCode":28328
},
{
"GivenName":"Joseph",
"Surname":"Loeb",
"EmailAddress":"JosephLLoeb@einrot.com",
"TelephoneNumber":"786-478-2913",
"StreetAddress":"3319 Rinehart Road",
"City":"Miami",
"State":"FL",
"ZipCode":33176
},
{
"GivenName":"Maria",
"Surname":"Goss",
"EmailAddress":"MariaGGoss@jourrapide.com",
"TelephoneNumber":"517-596-6864",
"StreetAddress":"2984 Haven Lane",
"City":"Munith",
"State":"MI",
"ZipCode":49259
},
{
"GivenName":"Maria",
"Surname":"Coleman",
"EmailAddress":"MariaKColeman@superrito.com",
"TelephoneNumber":"678-560-2550",
"StreetAddress":"4586 Hanifan Lane",
"City":"Marietta",
"State":"GA",
"ZipCode":30062
},
{
"GivenName":"Dorothy",
"Surname":"Dixon",
"EmailAddress":"DorothyMDixon@armyspy.com",
"TelephoneNumber":"510-446-4756",
"StreetAddress":"4000 Thompson Drive",
"City":"Oakland",
"State":"CA",
"ZipCode":94612
},
{
"GivenName":"Manuel",
"Surname":"Herrman",
"EmailAddress":"ManuelGHerrman@cuvox.de",
"TelephoneNumber":"515-222-9358",
"StreetAddress":"2051 Jenna Lane",
"City":"West Des Moines",
"State":"IA",
"ZipCode":50266
},
{
"GivenName":"Janet",
"Surname":"Neal",
"EmailAddress":"JanetCNeal@einrot.com",
"TelephoneNumber":"580-919-5269",
"StreetAddress":"1927 Dovetail Estates",
"City":"Oklahoma City",
"State":"OK",
"ZipCode":73102
},
{
"GivenName":"James",
"Surname":"Gonzales",
"EmailAddress":"JamesAGonzales@einrot.com",
"TelephoneNumber":"781-348-3571",
"StreetAddress":"617 Romano Street",
"City":"Braintree",
"State":"MA",
"ZipCode":42184
},
{
"GivenName":"Pricilla",
"Surname":"Vancleave",
"EmailAddress":"PricillaFVancleave@jourrapide.com",
"TelephoneNumber":"202-313-2278",
"StreetAddress":"4435 Passaic Street",
"City":"Washington",
"State":"DC",
"ZipCode":20007
},
{
"GivenName":"Carl",
"Surname":"McConnell",
"EmailAddress":"CarlLMcConnell@cuvox.de",
"TelephoneNumber":"504-368-0101",
"StreetAddress":"2670 Paul Wayne Haggerty Road",
"City":"New Orleans",
"State":"LA",
"ZipCode":70114
},
{
"GivenName":"Maxine",
"Surname":"Darden",
"EmailAddress":"MaxineGDarden@einrot.com",
"TelephoneNumber":"706-866-6418",
"StreetAddress":"1250 Riverside Drive",
"City":"Chattanooga",
"State":"GA",
"ZipCode":30741
},
{
"GivenName":"Johnny",
"Surname":"Robinson",
"EmailAddress":"JohnnyVRobinson@gustr.com",
"TelephoneNumber":"206-465-2683",
"StreetAddress":"3016 University Street",
"City":"Seattle",
"State":"WA",
"ZipCode":98106
},
{
"GivenName":"Pauline",
"Surname":"Sanders",
"EmailAddress":"PaulineGSanders@cuvox.de",
"TelephoneNumber":"910-409-9341",
"StreetAddress":"2862 Twin Willow Lane",
"City":"Wilmington",
"State":"NC",
"ZipCode":28401
},
{
"GivenName":"Eleanor",
"Surname":"Williamson",
"EmailAddress":"EleanorDWilliamson@teleworm.us",
"TelephoneNumber":"347-658-6025",
"StreetAddress":"191 Redbud Drive",
"City":"New York",
"State":"NY",
"ZipCode":10013
},
{
"GivenName":"Frank",
"Surname":"Fernandez",
"EmailAddress":"FrankCFernandez@rhyta.com",
"TelephoneNumber":"908-843-5847",
"StreetAddress":"2337 Beechwood Avenue",
"City":"Piscataway",
"State":"NJ",
"ZipCode":48854
},
{
"GivenName":"Toni",
"Surname":"Miller",
"EmailAddress":"ToniIMiller@jourrapide.com",
"TelephoneNumber":"870-227-0648",
"StreetAddress":"200 Arlington Avenue",
"City":"Little Rock",
"State":"AR",
"ZipCode":72212
},
{
"GivenName":"Kevin",
"Surname":"Dow",
"EmailAddress":"KevinRDow@gustr.com",
"TelephoneNumber":"406-685-4119",
"StreetAddress":"1453 Tibbs Avenue",
"City":"Harrison",
"State":"MT",
"ZipCode":59735
},
{
"GivenName":"Brigitte",
"Surname":"Silvestri",
"EmailAddress":"BrigitteMSilvestri@dayrep.com",
"TelephoneNumber":"206-292-9024",
"StreetAddress":"1887 Owagner Lane",
"City":"Seattle",
"State":"WA",
"ZipCode":98101
},
{
"GivenName":"Moises",
"Surname":"Painter",
"EmailAddress":"MoisesPPainter@cuvox.de",
"TelephoneNumber":"847-808-1060",
"StreetAddress":"3934 Pinewood Drive",
"City":"Wheeling",
"State":"IL",
"ZipCode":60090
},
{
"GivenName":"Alice",
"Surname":"Pasternak",
"EmailAddress":"AliceWPasternak@superrito.com",
"TelephoneNumber":"412-405-2099",
"StreetAddress":"1554 Chandler Hollow Road",
"City":"Pittsburgh",
"State":"PA",
"ZipCode":15212
},
{
"GivenName":"Heather",
"Surname":"Garcia",
"EmailAddress":"HeatherFGarcia@jourrapide.com",
"TelephoneNumber":"651-696-2578",
"StreetAddress":"1325 B Street",
"City":"Saint Paul",
"State":"MN",
"ZipCode":55105
},
{
"GivenName":"Catherine",
"Surname":"Dale",
"EmailAddress":"CatherineJDale@armyspy.com",
"TelephoneNumber":"979-691-8791",
"StreetAddress":"2429 Colonial Drive",
"City":"College Station",
"State":"TX",
"ZipCode":77840
},
{
"GivenName":"Robert",
"Surname":"Morgan",
"EmailAddress":"RobertRMorgan@cuvox.de",
"TelephoneNumber":"847-676-5204",
"StreetAddress":"3955 Vine Street",
"City":"Skokie",
"State":"IL",
"ZipCode":60077
},
{
"GivenName":"Hazel",
"Surname":"Payne",
"EmailAddress":"HazelFPayne@rhyta.com",
"TelephoneNumber":"404-656-6330",
"StreetAddress":"2815 Junior Avenue",
"City":"Atlanta",
"State":"GA",
"ZipCode":30303
},
{
"GivenName":"Barbara",
"Surname":"Espinosa",
"EmailAddress":"BarbaraCEspinosa@einrot.com",
"TelephoneNumber":"508-937-1231",
"StreetAddress":"61 Lyon Avenue",
"City":"Springfield",
"State":"MA",
"ZipCode":41109
},
{
"GivenName":"Lionel",
"Surname":"Dayton",
"EmailAddress":"LionelCDayton@gustr.com",
"TelephoneNumber":"801-715-8424",
"StreetAddress":"3500 Hickory Street",
"City":"Salt Lake City",
"State":"UT",
"ZipCode":84111
},
{
"GivenName":"Jayme",
"Surname":"Rabe",
"EmailAddress":"JaymeCRabe@armyspy.com",
"TelephoneNumber":"813-541-3494",
"StreetAddress":"3844 Saints Alley",
"City":"Plant City",
"State":"FL",
"ZipCode":33566
},
{
"GivenName":"Alma",
"Surname":"Wood",
"EmailAddress":"AlmaPWood@einrot.com",
"TelephoneNumber":"614-785-5621",
"StreetAddress":"1955 Collins Avenue",
"City":"Worthington",
"State":"OH",
"ZipCode":43085
},
{
"GivenName":"Melissa",
"Surname":"Brown",
"EmailAddress":"MelissaFBrown@einrot.com",
"TelephoneNumber":"330-817-7010",
"StreetAddress":"1347 Rivendell Drive",
"City":"Youngstown",
"State":"OH",
"ZipCode":44503
},
{
"GivenName":"Jose",
"Surname":"Quinn",
"EmailAddress":"JoseMQuinn@armyspy.com",
"TelephoneNumber":"406-206-8592",
"StreetAddress":"2150 Meadow Drive",
"City":"Billings",
"State":"MT",
"ZipCode":59102
},
{
"GivenName":"Lois",
"Surname":"Campbell",
"EmailAddress":"LoisJCampbell@gustr.com",
"TelephoneNumber":"203-613-3078",
"StreetAddress":"1428 Whitman Court",
"City":"Wallingford",
"State":"CT",
"ZipCode":46492
},
{
"GivenName":"Susan",
"Surname":"Walker",
"EmailAddress":"SusanOWalker@einrot.com",
"TelephoneNumber":"252-574-6645",
"StreetAddress":"2313 Rockwell Lane",
"City":"Jackson",
"State":"NC",
"ZipCode":27845
},
{
"GivenName":"Dana",
"Surname":"Hendricks",
"EmailAddress":"DanaAHendricks@armyspy.com",
"TelephoneNumber":"631-360-1753",
"StreetAddress":"1015 Wayback Lane",
"City":"Smithtown",
"State":"NY",
"ZipCode":11787
},
{
"GivenName":"Michael",
"Surname":"Leone",
"EmailAddress":"MichaelMLeone@dayrep.com",
"TelephoneNumber":"716-278-0325",
"StreetAddress":"507 Cameron Road",
"City":"Niagara Falls",
"State":"NY",
"ZipCode":14301
},
{
"GivenName":"Patrick",
"Surname":"Baker",
"EmailAddress":"PatrickABaker@superrito.com",
"TelephoneNumber":"402-942-5750",
"StreetAddress":"3805 Bungalow Road",
"City":"Omaha",
"State":"NE",
"ZipCode":68114
},
{
"GivenName":"Eddie",
"Surname":"Paschall",
"EmailAddress":"EddieDPaschall@cuvox.de",
"TelephoneNumber":"734-403-4199",
"StreetAddress":"4362 Charles Street",
"City":"Southfield",
"State":"MI",
"ZipCode":48034
},
{
"GivenName":"Robert",
"Surname":"Conrad",
"EmailAddress":"RobertFConrad@teleworm.us",
"TelephoneNumber":"612-607-5015",
"StreetAddress":"2085 Jewell Road",
"City":"Minneapolis",
"State":"MN",
"ZipCode":55401
},
{
"GivenName":"David",
"Surname":"Forth",
"EmailAddress":"DavidDForth@dayrep.com",
"TelephoneNumber":"970-584-0779",
"StreetAddress":"2777 Pick Street",
"City":"Centennial",
"State":"CO",
"ZipCode":80112
},
{
"GivenName":"David",
"Surname":"Pritchett",
"EmailAddress":"DavidKPritchett@fleckens.hu",
"TelephoneNumber":"714-322-1318",
"StreetAddress":"211 Maple Street",
"City":"Anaheim",
"State":"CA",
"ZipCode":92801
},
{
"GivenName":"Derek",
"Surname":"Lewis",
"EmailAddress":"DerekDLewis@gustr.com",
"TelephoneNumber":"915-663-9288",
"StreetAddress":"1924 Birch Street",
"City":"El Paso",
"State":"TX",
"ZipCode":79905
},
{
"GivenName":"Julius",
"Surname":"Morse",
"EmailAddress":"JuliusBMorse@fleckens.hu",
"TelephoneNumber":"510-984-6597",
"StreetAddress":"1840 Station Street",
"City":"San Francisco",
"State":"CA",
"ZipCode":94105
},
{
"GivenName":"Earl",
"Surname":"Hollon",
"EmailAddress":"EarlNHollon@gustr.com",
"TelephoneNumber":"606-907-3897",
"StreetAddress":"1682 Hinkle Deegan Lake Road",
"City":"Lexington",
"State":"KY",
"ZipCode":40507
},
{
"GivenName":"Kevin",
"Surname":"Allen",
"EmailAddress":"KevinJAllen@dayrep.com",
"TelephoneNumber":"909-563-5228",
"StreetAddress":"1454 Gordon Street",
"City":"Los Angeles",
"State":"CA",
"ZipCode":90017
},
{
"GivenName":"David",
"Surname":"Labadie",
"EmailAddress":"DavidPLabadie@armyspy.com",
"TelephoneNumber":"661-695-5182",
"StreetAddress":"3880 Atha Drive",
"City":"Bakersfield",
"State":"CA",
"ZipCode":93301
},
{
"GivenName":"Alvin",
"Surname":"Miles",
"EmailAddress":"AlvinAMiles@dayrep.com",
"TelephoneNumber":"706-251-7673",
"StreetAddress":"1197 Holly Street",
"City":"Blue Ridge",
"State":"GA",
"ZipCode":30513
},
{
"GivenName":"Martin",
"Surname":"Halcomb",
"EmailAddress":"MartinDHalcomb@armyspy.com",
"TelephoneNumber":"207-456-4930",
"StreetAddress":"1303 Victoria Court",
"City":"Danforth",
"State":"ME",
"ZipCode":44424
},
{
"GivenName":"Porfirio",
"Surname":"Ingram",
"EmailAddress":"PorfirioJIngram@armyspy.com",
"TelephoneNumber":"585-243-0873",
"StreetAddress":"2208 Caldwell Road",
"City":"Geneseo",
"State":"NY",
"ZipCode":14454
},
{
"GivenName":"Ann",
"Surname":"Mair",
"EmailAddress":"AnnRMair@dayrep.com",
"TelephoneNumber":"732-494-0739",
"StreetAddress":"2382 Finwood Road",
"City":"Metuchen",
"State":"NJ",
"ZipCode":48840
},
{
"GivenName":"Shaun",
"Surname":"Brown",
"EmailAddress":"ShaunTBrown@fleckens.hu",
"TelephoneNumber":"910-866-8534",
"StreetAddress":"2536 Layman Avenue",
"City":"White Oak",
"State":"NC",
"ZipCode":28399
},
{
"GivenName":"Terri",
"Surname":"Coffman",
"EmailAddress":"TerriDCoffman@cuvox.de",
"TelephoneNumber":"443-849-2034",
"StreetAddress":"3710 Hickory Heights Drive",
"City":"Towson",
"State":"MD",
"ZipCode":21204
},
{
"GivenName":"Kathie",
"Surname":"Scheller",
"EmailAddress":"KathieWScheller@cuvox.de",
"TelephoneNumber":"423-221-0429",
"StreetAddress":"2322 Nixon Avenue",
"City":"Chattanooga",
"State":"TN",
"ZipCode":37421
},
{
"GivenName":"Duane",
"Surname":"Horton",
"EmailAddress":"DuaneCHorton@rhyta.com",
"TelephoneNumber":"510-448-7551",
"StreetAddress":"4803 Thompson Drive",
"City":"Oakland",
"State":"CA",
"ZipCode":94612
},
{
"GivenName":"Betty",
"Surname":"Presley",
"EmailAddress":"BettyCPresley@einrot.com",
"TelephoneNumber":"757-315-4005",
"StreetAddress":"1941 Pinchelone Street",
"City":"Norfolk",
"State":"VA",
"ZipCode":23504
},
{
"GivenName":"Robert",
"Surname":"Conner",
"EmailAddress":"RobertSConner@rhyta.com",
"TelephoneNumber":"714-609-4925",
"StreetAddress":"1962 Cimmaron Road",
"City":"Burbank",
"State":"CA",
"ZipCode":91505
},
{
"GivenName":"Rochelle",
"Surname":"Gusman",
"EmailAddress":"RochelleSGusman@superrito.com",
"TelephoneNumber":"662-536-9650",
"StreetAddress":"1299 Saint Clair Street",
"City":"Southaven",
"State":"MS",
"ZipCode":38671
},
{
"GivenName":"Alice",
"Surname":"Reyes",
"EmailAddress":"AliceJReyes@fleckens.hu",
"TelephoneNumber":"240-671-2488",
"StreetAddress":"1133 Chatham Way",
"City":"Washington",
"State":"MD",
"ZipCode":20200
},
{
"GivenName":"April",
"Surname":"Deemer",
"EmailAddress":"AprilWDeemer@superrito.com",
"TelephoneNumber":"561-870-7569",
"StreetAddress":"4343 Mulberry Lane",
"City":"West Palm Beach",
"State":"FL",
"ZipCode":33401
},
{
"GivenName":"Misty",
"Surname":"Jones",
"EmailAddress":"MistyCJones@einrot.com",
"TelephoneNumber":"913-980-1543",
"StreetAddress":"2929 Lake Forest Drive",
"City":"Kansas City",
"State":"KS",
"ZipCode":64108
},
{
"GivenName":"Bertie",
"Surname":"Burnam",
"EmailAddress":"BertieNBurnam@rhyta.com",
"TelephoneNumber":"915-200-8683",
"StreetAddress":"3865 Ward Road",
"City":"El Paso",
"State":"TX",
"ZipCode":79901
},
{
"GivenName":"Elizabeth",
"Surname":"Leon",
"EmailAddress":"ElizabethFLeon@dayrep.com",
"TelephoneNumber":"530-682-5579",
"StreetAddress":"2256 Eagles Nest Drive",
"City":"Rancho Cordova",
"State":"CA",
"ZipCode":95742
},
{
"GivenName":"Nathan",
"Surname":"Borman",
"EmailAddress":"NathanPBorman@einrot.com",
"TelephoneNumber":"386-677-6159",
"StreetAddress":"3526 Spirit Drive",
"City":"Ormond Beach",
"State":"FL",
"ZipCode":32174
},
{
"GivenName":"James",
"Surname":"Deluna",
"EmailAddress":"JamesDDeluna@cuvox.de",
"TelephoneNumber":"407-520-5016",
"StreetAddress":"1098 Linden Avenue",
"City":"Winter Park",
"State":"FL",
"ZipCode":32789
},
{
"GivenName":"Louis",
"Surname":"Wong",
"EmailAddress":"LouisMWong@armyspy.com",
"TelephoneNumber":"210-326-1093",
"StreetAddress":"4030 Weekley Street",
"City":"San Antonio",
"State":"TX",
"ZipCode":78212
},
{
"GivenName":"Harry",
"Surname":"Ellis",
"EmailAddress":"HarryEEllis@teleworm.us",
"TelephoneNumber":"724-553-9767",
"StreetAddress":"1094 Shinn Avenue",
"City":"Sewickley",
"State":"PA",
"ZipCode":15143
},
{
"GivenName":"Glenn",
"Surname":"Wiseman",
"EmailAddress":"GlennHWiseman@superrito.com",
"TelephoneNumber":"412-848-0930",
"StreetAddress":"184 Lucky Duck Drive",
"City":"Pittsburgh",
"State":"PA",
"ZipCode":15222
},
{
"GivenName":"Laura",
"Surname":"Brady",
"EmailAddress":"LauraJBrady@superrito.com",
"TelephoneNumber":"608-756-5978",
"StreetAddress":"1855 Irish Lane",
"City":"Janesville",
"State":"WI",
"ZipCode":53545
},
{
"GivenName":"Cesar",
"Surname":"Gray",
"EmailAddress":"CesarMGray@rhyta.com",
"TelephoneNumber":"970-769-0961",
"StreetAddress":"4582 Stark Hollow Road",
"City":"Durango",
"State":"CO",
"ZipCode":81302
},
{
"GivenName":"Rodney",
"Surname":"Paulson",
"EmailAddress":"RodneyDPaulson@fleckens.hu",
"TelephoneNumber":"217-397-6197",
"StreetAddress":"2762 Bolman Court",
"City":"Rankin",
"State":"IL",
"ZipCode":60960
},
{
"GivenName":"Lupita",
"Surname":"Prude",
"EmailAddress":"LupitaHPrude@superrito.com",
"TelephoneNumber":"501-388-9755",
"StreetAddress":"3136 Mulberry Avenue",
"City":"Little Rock",
"State":"AR",
"ZipCode":72212
},
{
"GivenName":"Erin",
"Surname":"Furlow",
"EmailAddress":"ErinJFurlow@fleckens.hu",
"TelephoneNumber":"916-398-0649",
"StreetAddress":"3776 Pearl Street",
"City":"Sacramento",
"State":"CA",
"ZipCode":95826
},
{
"GivenName":"Patricia",
"Surname":"Sharkey",
"EmailAddress":"PatriciaGSharkey@rhyta.com",
"TelephoneNumber":"513-260-1942",
"StreetAddress":"945 Walnut Hill Drive",
"City":"Cincinnati",
"State":"OH",
"ZipCode":45202
},
{
"GivenName":"Doris",
"Surname":"Robertson",
"EmailAddress":"DorisJRobertson@gustr.com",
"TelephoneNumber":"213-388-7047",
"StreetAddress":"832 Zimmerman Lane",
"City":"Los Angeles",
"State":"CA",
"ZipCode":90057
},
{
"GivenName":"Robert",
"Surname":"Malloy",
"EmailAddress":"RobertRMalloy@rhyta.com",
"TelephoneNumber":"713-662-5428",
"StreetAddress":"3715 Brooke Street",
"City":"Houston",
"State":"TX",
"ZipCode":77025
},
{
"GivenName":"Venita",
"Surname":"Wada",
"EmailAddress":"VenitaDWada@superrito.com",
"TelephoneNumber":"312-250-9950",
"StreetAddress":"3744 Cecil Street",
"City":"Westchester",
"State":"IL",
"ZipCode":60154
},
{
"GivenName":"Margie",
"Surname":"Lowe",
"EmailAddress":"MargieRLowe@fleckens.hu",
"TelephoneNumber":"757-204-1182",
"StreetAddress":"1119 Kildeer Drive",
"City":"Chesapeake",
"State":"VA",
"ZipCode":23323
},
{
"GivenName":"Robert",
"Surname":"Frye",
"EmailAddress":"RobertMFrye@jourrapide.com",
"TelephoneNumber":"305-729-9976",
"StreetAddress":"2400 Steve Hunt Road",
"City":"Miami Springs",
"State":"FL",
"ZipCode":33166
},
{
"GivenName":"Don",
"Surname":"McDonald",
"EmailAddress":"DonMMcDonald@armyspy.com",
"TelephoneNumber":"636-447-0008",
"StreetAddress":"4048 Rodney Street",
"City":"Harvester",
"State":"MO",
"ZipCode":63301
},
{
"GivenName":"Moses",
"Surname":"Powers",
"EmailAddress":"MosesDPowers@teleworm.us",
"TelephoneNumber":"734-839-4809",
"StreetAddress":"3322 Eagle Drive",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Joshua",
"Surname":"Barclay",
"EmailAddress":"JoshuaJBarclay@cuvox.de",
"TelephoneNumber":"530-367-8977",
"StreetAddress":"2885 Riverwood Drive",
"City":"Foresthill",
"State":"CA",
"ZipCode":95631
},
{
"GivenName":"Christine",
"Surname":"Harris",
"EmailAddress":"ChristineJHarris@teleworm.us",
"TelephoneNumber":"508-528-5193",
"StreetAddress":"4222 Ferguson Street",
"City":"Franklin",
"State":"MA",
"ZipCode":42038
},
{
"GivenName":"Ashley",
"Surname":"Drumm",
"EmailAddress":"AshleyMDrumm@rhyta.com",
"TelephoneNumber":"252-650-7602",
"StreetAddress":"665 Edwards Street",
"City":"Rocky Mount",
"State":"NC",
"ZipCode":27801
},
{
"GivenName":"Rufus",
"Surname":"Sampson",
"EmailAddress":"RufusPSampson@teleworm.us",
"TelephoneNumber":"415-756-4668",
"StreetAddress":"4027 Jim Rosa Lane",
"City":"Oakland",
"State":"CA",
"ZipCode":94607
},
{
"GivenName":"Danielle",
"Surname":"Ramos",
"EmailAddress":"DanielleSRamos@teleworm.us",
"TelephoneNumber":"715-645-2804",
"StreetAddress":"168 Mercer Street",
"City":"Barron",
"State":"WI",
"ZipCode":54812
},
{
"GivenName":"Christopher",
"Surname":"Carrillo",
"EmailAddress":"ChristopherCCarrillo@rhyta.com",
"TelephoneNumber":"914-813-7565",
"StreetAddress":"690 Deans Lane",
"City":"White Plains",
"State":"NY",
"ZipCode":10601
},
{
"GivenName":"Charles",
"Surname":"Cleveland",
"EmailAddress":"CharlesBCleveland@jourrapide.com",
"TelephoneNumber":"920-681-3385",
"StreetAddress":"3244 Highland Drive",
"City":"Milwaukee",
"State":"WI",
"ZipCode":53202
},
{
"GivenName":"Joyce",
"Surname":"Smith",
"EmailAddress":"JoyceWSmith@teleworm.us",
"TelephoneNumber":"337-353-6681",
"StreetAddress":"2490 Willow Oaks Lane",
"City":"Lake Charles",
"State":"LA",
"ZipCode":70629
},
{
"GivenName":"Jessica",
"Surname":"Ireland",
"EmailAddress":"JessicaJIreland@einrot.com",
"TelephoneNumber":"412-958-4542",
"StreetAddress":"1074 Frank Avenue",
"City":"Pittsburgh",
"State":"PA",
"ZipCode":15205
},
{
"GivenName":"Gloria",
"Surname":"Grove",
"EmailAddress":"GloriaJGrove@superrito.com",
"TelephoneNumber":"763-289-4693",
"StreetAddress":"2752 Willison Street",
"City":"Saint Paul",
"State":"MN",
"ZipCode":55104
},
{
"GivenName":"Ethel",
"Surname":"Moran",
"EmailAddress":"EthelDMoran@rhyta.com",
"TelephoneNumber":"206-341-5645",
"StreetAddress":"3923 Owagner Lane",
"City":"Seattle",
"State":"WA",
"ZipCode":98101
},
{
"GivenName":"Sofia",
"Surname":"Setser",
"EmailAddress":"SofiaRSetser@dayrep.com",
"TelephoneNumber":"505-672-4779",
"StreetAddress":"96 Cooks Mine Road",
"City":"Los Alamos",
"State":"NM",
"ZipCode":87544
},
{
"GivenName":"Mildred",
"Surname":"Kelly",
"EmailAddress":"MildredNKelly@rhyta.com",
"TelephoneNumber":"816-724-9274",
"StreetAddress":"3838 White Oak Drive",
"City":"Rushville",
"State":"MO",
"ZipCode":64484
},
{
"GivenName":"Robert",
"Surname":"Rich",
"EmailAddress":"RobertERich@teleworm.us",
"TelephoneNumber":"817-483-2779",
"StreetAddress":"693 Jones Street",
"City":"Kennedale",
"State":"TX",
"ZipCode":76135
},
{
"GivenName":"Judith",
"Surname":"Madden",
"EmailAddress":"JudithGMadden@teleworm.us",
"TelephoneNumber":"507-396-0727",
"StreetAddress":"4697 Pritchard Court",
"City":"Owatonna",
"State":"MN",
"ZipCode":55060
},
{
"GivenName":"Leon",
"Surname":"Wynkoop",
"EmailAddress":"LeonSWynkoop@einrot.com",
"TelephoneNumber":"315-293-2810",
"StreetAddress":"3757 Oak Street",
"City":"Syracuse",
"State":"NY",
"ZipCode":13202
},
{
"GivenName":"James",
"Surname":"McGill",
"EmailAddress":"JamesEMcGill@fleckens.hu",
"TelephoneNumber":"281-783-7458",
"StreetAddress":"417 Grey Fox Farm Road",
"City":"Houston",
"State":"TX",
"ZipCode":77028
},
{
"GivenName":"Jennifer",
"Surname":"Collins",
"EmailAddress":"JenniferBCollins@fleckens.hu",
"TelephoneNumber":"316-655-6749",
"StreetAddress":"3089 Williams Lane",
"City":"Wichita",
"State":"KS",
"ZipCode":67202
},
{
"GivenName":"Nicholas",
"Surname":"Jones",
"EmailAddress":"NicholasRJones@dayrep.com",
"TelephoneNumber":"201-805-0526",
"StreetAddress":"575 Lakewood Drive",
"City":"Rochelle Park",
"State":"NJ",
"ZipCode":47662
},
{
"GivenName":"Marie",
"Surname":"McLaughlin",
"EmailAddress":"MarieJMcLaughlin@armyspy.com",
"TelephoneNumber":"602-591-2289",
"StreetAddress":"1523 Coplin Avenue",
"City":"Phoenix",
"State":"AZ",
"ZipCode":85016
},
{
"GivenName":"Albert",
"Surname":"Krogman",
"EmailAddress":"AlbertLKrogman@rhyta.com",
"TelephoneNumber":"712-332-9500",
"StreetAddress":"821 Ashwood Drive",
"City":"Arnolds Park",
"State":"IA",
"ZipCode":51331
},
{
"GivenName":"Effie",
"Surname":"Gill",
"EmailAddress":"EffieDGill@teleworm.us",
"TelephoneNumber":"225-237-7930",
"StreetAddress":"2655 Victoria Street",
"City":"Baton Rouge",
"State":"LA",
"ZipCode":70806
},
{
"GivenName":"Rebecca",
"Surname":"Amar",
"EmailAddress":"RebeccaMAmar@rhyta.com",
"TelephoneNumber":"765-371-6806",
"StreetAddress":"4980 Overlook Drive",
"City":"Indianapolis",
"State":"IN",
"ZipCode":46225
},
{
"GivenName":"Andre",
"Surname":"Wheaton",
"EmailAddress":"AndreAWheaton@armyspy.com",
"TelephoneNumber":"775-540-2580",
"StreetAddress":"4832 Sheila Lane",
"City":"Las Vegas",
"State":"NV",
"ZipCode":89101
},
{
"GivenName":"Albert",
"Surname":"Hudnall",
"EmailAddress":"AlbertIHudnall@jourrapide.com",
"TelephoneNumber":"251-438-4995",
"StreetAddress":"819 Lonely Oak Drive",
"City":"Mobile",
"State":"AL",
"ZipCode":36602
},
{
"GivenName":"Edwin",
"Surname":"Guyton",
"EmailAddress":"EdwinAGuyton@superrito.com",
"TelephoneNumber":"781-265-7937",
"StreetAddress":"3982 Burke Street",
"City":"Framingham",
"State":"MA",
"ZipCode":41701
},
{
"GivenName":"Zora",
"Surname":"Struck",
"EmailAddress":"ZoraJStruck@gustr.com",
"TelephoneNumber":"609-783-7632",
"StreetAddress":"3547 Watson Street",
"City":"Somerdale",
"State":"NJ",
"ZipCode":48083
},
{
"GivenName":"Monica",
"Surname":"Barron",
"EmailAddress":"MonicaCBarron@teleworm.us",
"TelephoneNumber":"262-717-9061",
"StreetAddress":"3710 Fairfield Road",
"City":"Town Of Brookfield",
"State":"WI",
"ZipCode":53186
},
{
"GivenName":"Alma",
"Surname":"Johnson",
"EmailAddress":"AlmaAJohnson@armyspy.com",
"TelephoneNumber":"662-302-5879",
"StreetAddress":"4677 Oxford Court",
"City":"Greenwood",
"State":"MS",
"ZipCode":38930
},
{
"GivenName":"Tina",
"Surname":"Lucas",
"EmailAddress":"TinaALucas@gustr.com",
"TelephoneNumber":"912-670-1837",
"StreetAddress":"4960 Yorkie Lane",
"City":"Waycross",
"State":"GA",
"ZipCode":31501
},
{
"GivenName":"James",
"Surname":"Prescott",
"EmailAddress":"JamesEPrescott@superrito.com",
"TelephoneNumber":"203-546-1063",
"StreetAddress":"845 Cook Hill Road",
"City":"Hartford",
"State":"CT",
"ZipCode":46103
},
{
"GivenName":"Esther",
"Surname":"Bushey",
"EmailAddress":"EstherTBushey@rhyta.com",
"TelephoneNumber":"703-805-1667",
"StreetAddress":"4281 Perine Street",
"City":"Alexandria",
"State":"VA",
"ZipCode":22370
},
{
"GivenName":"Juan",
"Surname":"Strain",
"EmailAddress":"JuanAStrain@cuvox.de",
"TelephoneNumber":"502-368-6398",
"StreetAddress":"3274 Cerullo Road",
"City":"Louisville",
"State":"KY",
"ZipCode":40244
},
{
"GivenName":"Freddie",
"Surname":"Shafer",
"EmailAddress":"FreddieRShafer@jourrapide.com",
"TelephoneNumber":"570-582-3452",
"StreetAddress":"1008 Coal Road",
"City":"Pittston",
"State":"PA",
"ZipCode":18640
},
{
"GivenName":"Phillis",
"Surname":"Dixon",
"EmailAddress":"PhillisCDixon@cuvox.de",
"TelephoneNumber":"904-221-7736",
"StreetAddress":"867 Alpha Avenue",
"City":"Jacksonville Beach",
"State":"FL",
"ZipCode":32250
},
{
"GivenName":"Francisco",
"Surname":"Murray",
"EmailAddress":"FranciscoMMurray@gustr.com",
"TelephoneNumber":"408-922-5225",
"StreetAddress":"149 Sycamore Street",
"City":"San Jose",
"State":"CA",
"ZipCode":95131
},
{
"GivenName":"Wayne",
"Surname":"Young",
"EmailAddress":"WayneBYoung@einrot.com",
"TelephoneNumber":"704-574-1072",
"StreetAddress":"1591 Concord Street",
"City":"Matthews",
"State":"NC",
"ZipCode":28105
},
{
"GivenName":"Russell",
"Surname":"Sloan",
"EmailAddress":"RussellCSloan@dayrep.com",
"TelephoneNumber":"212-880-5788",
"StreetAddress":"2835 Small Street",
"City":"New York",
"State":"NY",
"ZipCode":10016
},
{
"GivenName":"Paula",
"Surname":"Calhoun",
"EmailAddress":"PaulaJCalhoun@armyspy.com",
"TelephoneNumber":"432-780-7695",
"StreetAddress":"1751 South Street",
"City":"Midland",
"State":"TX",
"ZipCode":79701
},
{
"GivenName":"Mary",
"Surname":"Henson",
"EmailAddress":"MarySHenson@armyspy.com",
"TelephoneNumber":"646-473-4822",
"StreetAddress":"3621 Elm Drive",
"City":"New York",
"State":"NY",
"ZipCode":10018
},
{
"GivenName":"Jane",
"Surname":"Peavey",
"EmailAddress":"JaneJPeavey@rhyta.com",
"TelephoneNumber":"928-854-5800",
"StreetAddress":"3191 Farm Meadow Drive",
"City":"Lake Havasu City",
"State":"AZ",
"ZipCode":86403
},
{
"GivenName":"Mario",
"Surname":"Rex",
"EmailAddress":"MarioKRex@jourrapide.com",
"TelephoneNumber":"903-467-8291",
"StreetAddress":"3745 Florence Street",
"City":"Corsicana",
"State":"TX",
"ZipCode":75110
},
{
"GivenName":"Mark",
"Surname":"Cowan",
"EmailAddress":"MarkMCowan@jourrapide.com",
"TelephoneNumber":"813-479-4612",
"StreetAddress":"1670 Marion Drive",
"City":"Tampa",
"State":"FL",
"ZipCode":33602
},
{
"GivenName":"Carl",
"Surname":"Rodgers",
"EmailAddress":"CarlBRodgers@cuvox.de",
"TelephoneNumber":"754-235-4569",
"StreetAddress":"12 Kildeer Drive",
"City":"Boynton Beach",
"State":"FL",
"ZipCode":33436
},
{
"GivenName":"Kim",
"Surname":"Harvey",
"EmailAddress":"KimJHarvey@rhyta.com",
"TelephoneNumber":"901-375-7172",
"StreetAddress":"3676 Woodridge Lane",
"City":"Memphis",
"State":"TN",
"ZipCode":38118
},
{
"GivenName":"Rhonda",
"Surname":"Jimenez",
"EmailAddress":"RhondaSJimenez@rhyta.com",
"TelephoneNumber":"740-747-2121",
"StreetAddress":"1240 Viking Drive",
"City":"Ashley",
"State":"OH",
"ZipCode":43003
},
{
"GivenName":"Lindsay",
"Surname":"Jennings",
"EmailAddress":"LindsayJJennings@einrot.com",
"TelephoneNumber":"931-346-5335",
"StreetAddress":"3496 McDowell Street",
"City":"Nashville",
"State":"TN",
"ZipCode":37214
},
{
"GivenName":"Linda",
"Surname":"Short",
"EmailAddress":"LindaLShort@teleworm.us",
"TelephoneNumber":"215-263-2521",
"StreetAddress":"4524 Burning Memory Lane",
"City":"Philadelphia",
"State":"PA",
"ZipCode":19103
},
{
"GivenName":"Lindsay",
"Surname":"Swank",
"EmailAddress":"LindsayKSwank@rhyta.com",
"TelephoneNumber":"973-441-8367",
"StreetAddress":"2300 Granville Lane",
"City":"Teterboro",
"State":"NJ",
"ZipCode":47608
},
{
"GivenName":"Melissa",
"Surname":"Riddick",
"EmailAddress":"MelissaTRiddick@cuvox.de",
"TelephoneNumber":"215-927-2796",
"StreetAddress":"1512 Hiddenview Drive",
"City":"Philadelphia",
"State":"PA",
"ZipCode":19126
},
{
"GivenName":"Jarrett",
"Surname":"Free",
"EmailAddress":"JarrettCFree@superrito.com",
"TelephoneNumber":"734-293-7283",
"StreetAddress":"2995 Lakeland Terrace",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Evelyn",
"Surname":"Traylor",
"EmailAddress":"EvelynATraylor@superrito.com",
"TelephoneNumber":"609-265-0009",
"StreetAddress":"1242 Dark Hollow Road",
"City":"Mount Holly",
"State":"NJ",
"ZipCode":48060
},
{
"GivenName":"Margaret",
"Surname":"Bell",
"EmailAddress":"MargaretOBell@einrot.com",
"TelephoneNumber":"318-797-1656",
"StreetAddress":"899 Emerson Road",
"City":"Shreveport",
"State":"LA",
"ZipCode":71115
},
{
"GivenName":"Fletcher",
"Surname":"Allen",
"EmailAddress":"FletcherMAllen@superrito.com",
"TelephoneNumber":"908-644-6235",
"StreetAddress":"1205 Hedge Street",
"City":"Piscataway",
"State":"NJ",
"ZipCode":48854
},
{
"GivenName":"Martin",
"Surname":"Buckles",
"EmailAddress":"MartinVBuckles@fleckens.hu",
"TelephoneNumber":"904-259-7553",
"StreetAddress":"3125 Alpha Avenue",
"City":"Macclenny",
"State":"FL",
"ZipCode":32066
},
{
"GivenName":"Treva",
"Surname":"Davidson",
"EmailAddress":"TrevaJDavidson@jourrapide.com",
"TelephoneNumber":"323-883-2605",
"StreetAddress":"4293 Hillhaven Drive",
"City":"Los Angeles",
"State":"CA",
"ZipCode":90046
},
{
"GivenName":"Andrew",
"Surname":"Reed",
"EmailAddress":"AndrewMReed@einrot.com",
"TelephoneNumber":"909-388-3803",
"StreetAddress":"1817 Bel Meadow Drive",
"City":"San Bernardino",
"State":"CA",
"ZipCode":92410
},
{
"GivenName":"Luther",
"Surname":"Howell",
"EmailAddress":"LutherDHowell@teleworm.us",
"TelephoneNumber":"720-635-2697",
"StreetAddress":"3592 Clay Lick Road",
"City":"Denver",
"State":"CO",
"ZipCode":80202
},
{
"GivenName":"Roy",
"Surname":"Hyde",
"EmailAddress":"RoyCHyde@cuvox.de",
"TelephoneNumber":"707-946-4713",
"StreetAddress":"2294 Pretty View Lane",
"City":"Weott",
"State":"CA",
"ZipCode":95571
},
{
"GivenName":"Signe",
"Surname":"Wilkerson",
"EmailAddress":"SigneTWilkerson@cuvox.de",
"TelephoneNumber":"818-361-9887",
"StreetAddress":"1317 Edsel Road",
"City":"San Fernando",
"State":"CA",
"ZipCode":91340
},
{
"GivenName":"Michael",
"Surname":"Rivera",
"EmailAddress":"MichaelTRivera@fleckens.hu",
"TelephoneNumber":"775-754-7003",
"StreetAddress":"351 Martha Ellen Drive",
"City":"Carlin",
"State":"NV",
"ZipCode":89822
},
{
"GivenName":"August",
"Surname":"Padgett",
"EmailAddress":"AugustMPadgett@dayrep.com",
"TelephoneNumber":"917-635-1186",
"StreetAddress":"1293 Hanover Street",
"City":"New York",
"State":"NY",
"ZipCode":10038
},
{
"GivenName":"Billy",
"Surname":"Robinette",
"EmailAddress":"BillyERobinette@armyspy.com",
"TelephoneNumber":"206-287-3166",
"StreetAddress":"875 Owagner Lane",
"City":"Seattle",
"State":"WA",
"ZipCode":98101
},
{
"GivenName":"Jena",
"Surname":"Dejesus",
"EmailAddress":"JenaBDejesus@einrot.com",
"TelephoneNumber":"954-303-2227",
"StreetAddress":"1502 Trails End Road",
"City":"Ft Lauderdale",
"State":"FL",
"ZipCode":33311
},
{
"GivenName":"Jane",
"Surname":"Turner",
"EmailAddress":"JaneRTurner@superrito.com",
"TelephoneNumber":"316-990-4000",
"StreetAddress":"132 Henery Street",
"City":"Wichita",
"State":"KS",
"ZipCode":67214
},
{
"GivenName":"Kaci",
"Surname":"Young",
"EmailAddress":"KaciSYoung@superrito.com",
"TelephoneNumber":"630-634-4847",
"StreetAddress":"1286 Steele Street",
"City":"Oak Brook",
"State":"IL",
"ZipCode":60523
},
{
"GivenName":"Ada",
"Surname":"Novak",
"EmailAddress":"AdaDNovak@jourrapide.com",
"TelephoneNumber":"916-393-0487",
"StreetAddress":"4271 Pearl Street",
"City":"Sacramento",
"State":"CA",
"ZipCode":95823
},
{
"GivenName":"Jeffrey",
"Surname":"Martinez",
"EmailAddress":"JeffreyBMartinez@fleckens.hu",
"TelephoneNumber":"330-524-0972",
"StreetAddress":"1719 Rainbow Drive",
"City":"Akron",
"State":"OH",
"ZipCode":44308
},
{
"GivenName":"Ken",
"Surname":"Torbert",
"EmailAddress":"KenETorbert@superrito.com",
"TelephoneNumber":"303-609-3231",
"StreetAddress":"2555 Sampson Street",
"City":"Denver",
"State":"CO",
"ZipCode":80221
},
{
"GivenName":"David",
"Surname":"George",
"EmailAddress":"DavidBGeorge@einrot.com",
"TelephoneNumber":"910-743-8340",
"StreetAddress":"1566 Armory Road",
"City":"Maysville",
"State":"NC",
"ZipCode":28555
},
{
"GivenName":"Daniel",
"Surname":"Silva",
"EmailAddress":"DanielPSilva@jourrapide.com",
"TelephoneNumber":"405-485-9701",
"StreetAddress":"2338 Benson Park Drive",
"City":"Blanchard",
"State":"OK",
"ZipCode":73010
},
{
"GivenName":"Brooke",
"Surname":"Phillips",
"EmailAddress":"BrookeJPhillips@superrito.com",
"TelephoneNumber":"305-603-7437",
"StreetAddress":"2924 Golden Street",
"City":"Miami",
"State":"FL",
"ZipCode":33128
},
{
"GivenName":"Erin",
"Surname":"Cross",
"EmailAddress":"ErinRCross@jourrapide.com",
"TelephoneNumber":"831-427-8493",
"StreetAddress":"3889 Ventura Drive",
"City":"Santa Cruz",
"State":"CA",
"ZipCode":95060
},
{
"GivenName":"Yvonne",
"Surname":"Montes",
"EmailAddress":"YvonneHMontes@einrot.com",
"TelephoneNumber":"931-788-8080",
"StreetAddress":"2993 Glory Road",
"City":"Tansi",
"State":"TN",
"ZipCode":38555
},
{
"GivenName":"Ramona",
"Surname":"Grant",
"EmailAddress":"RamonaRGrant@superrito.com",
"TelephoneNumber":"501-465-5997",
"StreetAddress":"2917 Masonic Hill Road",
"City":"Little Rock",
"State":"AR",
"ZipCode":72205
},
{
"GivenName":"Helen",
"Surname":"Borrego",
"EmailAddress":"HelenEBorrego@fleckens.hu",
"TelephoneNumber":"207-923-6055",
"StreetAddress":"1312 Upton Avenue",
"City":"South China",
"State":"ME",
"ZipCode":44358
},
{
"GivenName":"Sarah",
"Surname":"Outlaw",
"EmailAddress":"SarahGOutlaw@superrito.com",
"TelephoneNumber":"347-307-7062",
"StreetAddress":"446 My Drive",
"City":"New York",
"State":"NY",
"ZipCode":10013
},
{
"GivenName":"Henry",
"Surname":"Elks",
"EmailAddress":"HenrySElks@gustr.com",
"TelephoneNumber":"330-474-3729",
"StreetAddress":"3359 Rainbow Drive",
"City":"Akron",
"State":"OH",
"ZipCode":44311
},
{
"GivenName":"Amanda",
"Surname":"Hart",
"EmailAddress":"AmandaJHart@einrot.com",
"TelephoneNumber":"972-439-0257",
"StreetAddress":"2647 Whispering Pines Circle",
"City":"Richardson",
"State":"TX",
"ZipCode":75081
},
{
"GivenName":"Gayle",
"Surname":"Askew",
"EmailAddress":"GayleDAskew@gustr.com",
"TelephoneNumber":"260-515-8101",
"StreetAddress":"1205 Windy Ridge Road",
"City":"Fort Wayne",
"State":"IN",
"ZipCode":46818
},
{
"GivenName":"Trisha",
"Surname":"Farrington",
"EmailAddress":"TrishaJFarrington@jourrapide.com",
"TelephoneNumber":"989-667-3581",
"StreetAddress":"3182 Mount Street",
"City":"Bay City",
"State":"MI",
"ZipCode":48706
},
{
"GivenName":"Christina",
"Surname":"Louviere",
"EmailAddress":"ChristinaELouviere@jourrapide.com",
"TelephoneNumber":"408-854-4059",
"StreetAddress":"1259 Sycamore Street",
"City":"San Jose",
"State":"CA",
"ZipCode":95110
},
{
"GivenName":"Rocky",
"Surname":"Moore",
"EmailAddress":"RockySMoore@fleckens.hu",
"TelephoneNumber":"678-722-6543",
"StreetAddress":"3555 Kuhl Avenue",
"City":"Atlanta",
"State":"GA",
"ZipCode":30303
},
{
"GivenName":"Alvin",
"Surname":"Timpson",
"EmailAddress":"AlvinBTimpson@einrot.com",
"TelephoneNumber":"908-789-0914",
"StreetAddress":"1594 Beechwood Avenue",
"City":"Westfield",
"State":"NJ",
"ZipCode":47090
},
{
"GivenName":"Kenneth",
"Surname":"Blanchard",
"EmailAddress":"KennethNBlanchard@gustr.com",
"TelephoneNumber":"305-702-1549",
"StreetAddress":"1999 Steve Hunt Road",
"City":"Miami",
"State":"FL",
"ZipCode":33131
},
{
"GivenName":"Sheldon",
"Surname":"Fogle",
"EmailAddress":"SheldonJFogle@gustr.com",
"TelephoneNumber":"405-631-4786",
"StreetAddress":"1782 Hott Street",
"City":"Oklahoma City",
"State":"OK",
"ZipCode":73109
},
{
"GivenName":"Ruby",
"Surname":"Hatch",
"EmailAddress":"RubyRHatch@gustr.com",
"TelephoneNumber":"224-616-6262",
"StreetAddress":"4464 Victoria Street",
"City":"Chicago",
"State":"IL",
"ZipCode":60631
},
{
"GivenName":"Leonida",
"Surname":"Gentry",
"EmailAddress":"LeonidaWGentry@teleworm.us",
"TelephoneNumber":"409-721-9980",
"StreetAddress":"2906 Burwell Heights Road",
"City":"Nederland",
"State":"TX",
"ZipCode":77627
},
{
"GivenName":"Robert",
"Surname":"Taylor",
"EmailAddress":"RobertRTaylor@superrito.com",
"TelephoneNumber":"360-882-1329",
"StreetAddress":"875 Pratt Avenue",
"City":"Orchards",
"State":"WA",
"ZipCode":98662
},
{
"GivenName":"Sherry",
"Surname":"Holley",
"EmailAddress":"SherryRHolley@dayrep.com",
"TelephoneNumber":"317-639-2107",
"StreetAddress":"3163 Stewart Street",
"City":"Indianapolis",
"State":"IN",
"ZipCode":46204
},
{
"GivenName":"Edith",
"Surname":"Salazar",
"EmailAddress":"EdithJSalazar@jourrapide.com",
"TelephoneNumber":"337-885-5972",
"StreetAddress":"930 Hillside Drive",
"City":"Chataignier",
"State":"LA",
"ZipCode":70524
},
{
"GivenName":"Luella",
"Surname":"Wilson",
"EmailAddress":"LuellaIWilson@gustr.com",
"TelephoneNumber":"727-893-5395",
"StreetAddress":"2733 Mapleview Drive",
"City":"St Petersburg",
"State":"FL",
"ZipCode":33701
},
{
"GivenName":"Rita",
"Surname":"Becerra",
"EmailAddress":"RitaJBecerra@gustr.com",
"TelephoneNumber":"224-420-7910",
"StreetAddress":"4244 Jadewood Drive",
"City":"Chicago",
"State":"IL",
"ZipCode":60605
},
{
"GivenName":"Doris",
"Surname":"Killough",
"EmailAddress":"DorisWKillough@einrot.com",
"TelephoneNumber":"812-382-7200",
"StreetAddress":"1745 Conaway Street",
"City":"Graysville",
"State":"IN",
"ZipCode":47882
},
{
"GivenName":"Santos",
"Surname":"Taylor",
"EmailAddress":"SantosSTaylor@fleckens.hu",
"TelephoneNumber":"818-448-9303",
"StreetAddress":"3829 Edsel Road",
"City":"Irvine",
"State":"CA",
"ZipCode":92614
},
{
"GivenName":"Angela",
"Surname":"Wilson",
"EmailAddress":"AngelaCWilson@armyspy.com",
"TelephoneNumber":"803-272-5771",
"StreetAddress":"4917 Wexford Way",
"City":"Irmo",
"State":"SC",
"ZipCode":29063
},
{
"GivenName":"Leonard",
"Surname":"Erler",
"EmailAddress":"LeonardTErler@superrito.com",
"TelephoneNumber":"706-582-0325",
"StreetAddress":"3628 Hart Country Lane",
"City":"Waverly Hall",
"State":"GA",
"ZipCode":31831
},
{
"GivenName":"Jesus",
"Surname":"Edwards",
"EmailAddress":"JesusAEdwards@fleckens.hu",
"TelephoneNumber":"903-272-4341",
"StreetAddress":"1553 Gladwell Street",
"City":"Cleburne",
"State":"TX",
"ZipCode":76031
},
{
"GivenName":"John",
"Surname":"Stutzman",
"EmailAddress":"JohnKStutzman@dayrep.com",
"TelephoneNumber":"252-926-3353",
"StreetAddress":"734 Fort Street",
"City":"Swanquarter",
"State":"NC",
"ZipCode":27885
},
{
"GivenName":"Elizabeth",
"Surname":"Woodward",
"EmailAddress":"ElizabethSWoodward@teleworm.us",
"TelephoneNumber":"217-693-3962",
"StreetAddress":"4570 Scenic Way",
"City":"Champaign",
"State":"IL",
"ZipCode":61820
},
{
"GivenName":"Rhonda",
"Surname":"Heffernan",
"EmailAddress":"RhondaRHeffernan@dayrep.com",
"TelephoneNumber":"256-615-8083",
"StreetAddress":"2247 Marcus Street",
"City":"Huntsville",
"State":"AL",
"ZipCode":35806
},
{
"GivenName":"Mary",
"Surname":"Zastrow",
"EmailAddress":"MaryMZastrow@teleworm.us",
"TelephoneNumber":"636-573-7435",
"StreetAddress":"2263 Rodney Street",
"City":"St Louis",
"State":"MO",
"ZipCode":63101
},
{
"GivenName":"Stanley",
"Surname":"Ward",
"EmailAddress":"StanleyCWard@dayrep.com",
"TelephoneNumber":"940-524-0171",
"StreetAddress":"4074 Alexander Drive",
"City":"Petrolia",
"State":"TX",
"ZipCode":76377
},
{
"GivenName":"John",
"Surname":"Lewis",
"EmailAddress":"JohnELewis@jourrapide.com",
"TelephoneNumber":"810-669-6085",
"StreetAddress":"4128 Perry Street",
"City":"Southfield",
"State":"MI",
"ZipCode":48075
},
{
"GivenName":"Rachael",
"Surname":"Sargent",
"EmailAddress":"RachaelDSargent@armyspy.com",
"TelephoneNumber":"310-536-2237",
"StreetAddress":"3800 Armbrester Drive",
"City":"El Segundo",
"State":"CA",
"ZipCode":90245
},
{
"GivenName":"Kim",
"Surname":"Boes",
"EmailAddress":"KimSBoes@armyspy.com",
"TelephoneNumber":"435-645-0031",
"StreetAddress":"1825 Burnside Avenue",
"City":"Park City",
"State":"UT",
"ZipCode":84060
},
{
"GivenName":"Rhonda",
"Surname":"Mitten",
"EmailAddress":"RhondaHMitten@fleckens.hu",
"TelephoneNumber":"443-781-7703",
"StreetAddress":"4232 Green Gate Lane",
"City":"Baltimore",
"State":"MD",
"ZipCode":21202
},
{
"GivenName":"Lance",
"Surname":"Gainey",
"EmailAddress":"LanceSGainey@fleckens.hu",
"TelephoneNumber":"504-875-6606",
"StreetAddress":"3406 Shadowmar Drive",
"City":"New Orleans",
"State":"LA",
"ZipCode":70112
},
{
"GivenName":"Yuko",
"Surname":"Sanford",
"EmailAddress":"YukoSSanford@gustr.com",
"TelephoneNumber":"270-619-5568",
"StreetAddress":"1882 Rowes Lane",
"City":"Madisonville",
"State":"KY",
"ZipCode":42431
},
{
"GivenName":"Elaine",
"Surname":"Drennen",
"EmailAddress":"ElaineMDrennen@armyspy.com",
"TelephoneNumber":"727-288-4502",
"StreetAddress":"2566 Badger Pond Lane",
"City":"Clearwater",
"State":"FL",
"ZipCode":34623
},
{
"GivenName":"Joseph",
"Surname":"Sells",
"EmailAddress":"JosephMSells@cuvox.de",
"TelephoneNumber":"417-736-6823",
"StreetAddress":"2240 Chandler Drive",
"City":"Strafford",
"State":"MO",
"ZipCode":65757
},
{
"GivenName":"Miss",
"Surname":"Delong",
"EmailAddress":"MissJDelong@cuvox.de",
"TelephoneNumber":"773-293-8837",
"StreetAddress":"989 Oakmound Road",
"City":"Chicago",
"State":"IL",
"ZipCode":60640
},
{
"GivenName":"Andrew",
"Surname":"Halter",
"EmailAddress":"AndrewMHalter@teleworm.us",
"TelephoneNumber":"571-275-7257",
"StreetAddress":"2935 Nelm Street",
"City":"Beltsville",
"State":"VA",
"ZipCode":20705
},
{
"GivenName":"Cathy",
"Surname":"Gillespie",
"EmailAddress":"CathyRGillespie@jourrapide.com",
"TelephoneNumber":"845-595-2404",
"StreetAddress":"253 Marshville Road",
"City":"New York",
"State":"NY",
"ZipCode":10013
},
{
"GivenName":"Leticia",
"Surname":"Brown",
"EmailAddress":"LeticiaDBrown@teleworm.us",
"TelephoneNumber":"678-473-8880",
"StreetAddress":"3933 Adonais Way",
"City":"Duluth",
"State":"GA",
"ZipCode":30136
},
{
"GivenName":"Carol",
"Surname":"Palombo",
"EmailAddress":"CarolLPalombo@einrot.com",
"TelephoneNumber":"440-682-6232",
"StreetAddress":"2628 Harley Vincent Drive",
"City":"Cleveland",
"State":"OH",
"ZipCode":44115
},
{
"GivenName":"Todd",
"Surname":"Kay",
"EmailAddress":"ToddVKay@dayrep.com",
"TelephoneNumber":"704-465-4581",
"StreetAddress":"1932 Kooter Lane",
"City":"Charlotte",
"State":"NC",
"ZipCode":28202
},
{
"GivenName":"Luna",
"Surname":"Hauck",
"EmailAddress":"LunaDHauck@gustr.com",
"TelephoneNumber":"360-737-6225",
"StreetAddress":"1354 Pinnickinick Street",
"City":"Vancouver",
"State":"WA",
"ZipCode":98663
},
{
"GivenName":"Wanda",
"Surname":"Pullins",
"EmailAddress":"WandaBPullins@dayrep.com",
"TelephoneNumber":"417-768-0539",
"StreetAddress":"1354 Briarwood Road",
"City":"Springfield",
"State":"MO",
"ZipCode":65806
},
]
|
rhefner1/ghidonations
|
sandbox/individual_names.py
|
Python
|
apache-2.0
| 124,418
|
[
"Brian",
"COLUMBUS",
"Elk"
] |
2dd7cf48ce75efe4840b151d9b118863b5415c0c01c6924a751a8e920a09f488
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import explanation_metadata
from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"Explanation",
"ModelExplanation",
"Attribution",
"ExplanationSpec",
"ExplanationParameters",
"SampledShapleyAttribution",
"IntegratedGradientsAttribution",
"XraiAttribution",
"SmoothGradConfig",
"FeatureNoiseSigma",
"ExplanationSpecOverride",
"ExplanationMetadataOverride",
},
)
class Explanation(proto.Message):
r"""Explanation of a prediction (provided in
[PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
produced by the Model on a given
[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
Attributes:
attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]):
Output only. Feature attributions grouped by predicted
outputs.
For Models that predict only one output, such as regression
Models that predict only one score, there is only one
attibution that explains the predicted output. For Models
that predict multiple outputs, such as multiclass Models
that predict multiple classes, each element explains one
specific item.
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
can be used to identify which output this attribution is
explaining.
If users set
[ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k],
the attributions are sorted by
[instance_output_value][Attributions.instance_output_value]
in descending order. If
[ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices]
is specified, the attributions are stored by
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
in the same order as they appear in the output_indices.
"""
attributions = proto.RepeatedField(proto.MESSAGE, number=1, message="Attribution",)
class ModelExplanation(proto.Message):
r"""Aggregated explanation metrics for a Model over a set of
instances.
Attributes:
mean_attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]):
Output only. Aggregated attributions explaining the Model's
prediction outputs over the set of instances. The
attributions are grouped by outputs.
For Models that predict only one output, such as regression
Models that predict only one score, there is only one
attibution that explains the predicted output. For Models
that predict multiple outputs, such as multiclass Models
that predict multiple classes, each element explains one
specific item.
[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
can be used to identify which output this attribution is
explaining.
The
[baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value],
[instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value]
and
[featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
fields are averaged over the test data.
NOTE: Currently AutoML tabular classification Models produce
only one attribution, which averages attributions over all
the classes it predicts.
[Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error]
is not populated.
"""
mean_attributions = proto.RepeatedField(
proto.MESSAGE, number=1, message="Attribution",
)
class Attribution(proto.Message):
r"""Attribution that explains a particular prediction output.
Attributes:
baseline_output_value (float):
Output only. Model predicted output if the input instance is
constructed from the baselines of all the features defined
in
[ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
The field name of the output is determined by the key in
[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
If the Model's predicted output has multiple dimensions
(rank > 1), this is the value in the output located by
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
If there are multiple baselines, their output values are
averaged.
instance_output_value (float):
Output only. Model predicted output on the corresponding
[explanation instance][ExplainRequest.instances]. The field
name of the output is determined by the key in
[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
If the Model predicted output has multiple dimensions, this
is the value in the output located by
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
feature_attributions (google.protobuf.struct_pb2.Value):
Output only. Attributions of each explained feature.
Features are extracted from the [prediction
instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
according to [explanation metadata for
inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
The value is a struct, whose keys are the name of the
feature. The values are how much the feature in the
[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
contributed to the predicted result.
The format of the value is determined by the feature's input
format:
- If the feature is a scalar value, the attribution value
is a [floating
number][google.protobuf.Value.number_value].
- If the feature is an array of scalar values, the
attribution value is an
[array][google.protobuf.Value.list_value].
- If the feature is a struct, the attribution value is a
[struct][google.protobuf.Value.struct_value]. The keys in
the attribution value struct are the same as the keys in
the feature struct. The formats of the values in the
attribution struct are determined by the formats of the
values in the feature struct.
The
[ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri]
field, pointed to by the
[ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec]
field of the
[Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
object, points to the schema file that describes the
features and their attribution values (if it is populated).
output_index (Sequence[int]):
Output only. The index that locates the explained prediction
output.
If the prediction output is a scalar value, output_index is
not populated. If the prediction output has multiple
dimensions, the length of the output_index list is the same
as the number of dimensions of the output. The i-th element
in output_index is the element index of the i-th dimension
of the output vector. Indices start from 0.
output_display_name (str):
Output only. The display name of the output identified by
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
For example, the predicted class name by a
multi-classification Model.
This field is only populated iff the Model predicts display
names as a separate field along with the explained output.
The predicted display name must has the same shape of the
explained output, and can be located using output_index.
approximation_error (float):
Output only. Error of
[feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
caused by approximation used in the explanation method.
Lower value means more precise attributions.
- For Sampled Shapley
[attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution],
increasing
[path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count]
might reduce the error.
- For Integrated Gradients
[attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
increasing
[step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count]
might reduce the error.
- For [XRAI
attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
increasing
[step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count]
might reduce the error.
See `this
introduction </vertex-ai/docs/explainable-ai/overview>`__
for more information.
output_name (str):
Output only. Name of the explain output. Specified as the
key in
[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
"""
baseline_output_value = proto.Field(proto.DOUBLE, number=1,)
instance_output_value = proto.Field(proto.DOUBLE, number=2,)
feature_attributions = proto.Field(
proto.MESSAGE, number=3, message=struct_pb2.Value,
)
output_index = proto.RepeatedField(proto.INT32, number=4,)
output_display_name = proto.Field(proto.STRING, number=5,)
approximation_error = proto.Field(proto.DOUBLE, number=6,)
output_name = proto.Field(proto.STRING, number=7,)
class ExplanationSpec(proto.Message):
r"""Specification of Model explanation.
Attributes:
parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters):
Required. Parameters that configure
explaining of the Model's predictions.
metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata):
Required. Metadata describing the Model's
input and output for explanation.
"""
parameters = proto.Field(proto.MESSAGE, number=1, message="ExplanationParameters",)
metadata = proto.Field(
proto.MESSAGE, number=2, message=explanation_metadata.ExplanationMetadata,
)
class ExplanationParameters(proto.Message):
r"""Parameters to configure explaining for Model's predictions.
Attributes:
sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution):
An attribution method that approximates
Shapley values for features that contribute to
the label being predicted. A sampling strategy
is used to approximate the value rather than
considering all subsets of features. Refer to
this paper for model details:
https://arxiv.org/abs/1306.4265.
integrated_gradients_attribution (google.cloud.aiplatform_v1beta1.types.IntegratedGradientsAttribution):
An attribution method that computes Aumann-
hapley values taking advantage of the model's
fully differentiable structure. Refer to this
paper for more details:
https://arxiv.org/abs/1703.01365
xrai_attribution (google.cloud.aiplatform_v1beta1.types.XraiAttribution):
An attribution method that redistributes
Integrated Gradients attribution to segmented
regions, taking advantage of the model's fully
differentiable structure. Refer to this paper
for more details:
https://arxiv.org/abs/1906.02825
XRAI currently performs better on natural
images, like a picture of a house or an animal.
If the images are taken in artificial
environments, like a lab or manufacturing line,
or from diagnostic equipment, like x-rays or
quality-control cameras, use Integrated
Gradients instead.
top_k (int):
If populated, returns attributions for top K
indices of outputs (defaults to 1). Only applies
to Models that predicts more than one outputs
(e,g, multi-class Models). When set to -1,
returns explanations for all outputs.
output_indices (google.protobuf.struct_pb2.ListValue):
If populated, only returns attributions that have
[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
contained in output_indices. It must be an ndarray of
integers, with the same shape of the output it's explaining.
If not populated, returns attributions for
[top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k]
indices of outputs. If neither top_k nor output_indeices is
populated, returns the argmax index of the outputs.
Only applicable to Models that predict multiple outputs
(e,g, multi-class Models that predict multiple classes).
"""
sampled_shapley_attribution = proto.Field(
proto.MESSAGE, number=1, oneof="method", message="SampledShapleyAttribution",
)
integrated_gradients_attribution = proto.Field(
proto.MESSAGE,
number=2,
oneof="method",
message="IntegratedGradientsAttribution",
)
xrai_attribution = proto.Field(
proto.MESSAGE, number=3, oneof="method", message="XraiAttribution",
)
top_k = proto.Field(proto.INT32, number=4,)
output_indices = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.ListValue,)
class SampledShapleyAttribution(proto.Message):
r"""An attribution method that approximates Shapley values for
features that contribute to the label being predicted. A
sampling strategy is used to approximate the value rather than
considering all subsets of features.
Attributes:
path_count (int):
Required. The number of feature permutations to consider
when approximating the Shapley values.
Valid range of its value is [1, 50], inclusively.
"""
path_count = proto.Field(proto.INT32, number=1,)
class IntegratedGradientsAttribution(proto.Message):
r"""An attribution method that computes the Aumann-Shapley value
taking advantage of the model's fully differentiable structure.
Refer to this paper for more details:
https://arxiv.org/abs/1703.01365
Attributes:
step_count (int):
Required. The number of steps for approximating the path
integral. A good value to start is 50 and gradually increase
until the sum to diff property is within the desired error
range.
Valid range of its value is [1, 100], inclusively.
smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig):
Config for SmoothGrad approximation of
gradients.
When enabled, the gradients are approximated by
averaging the gradients from noisy samples in
the vicinity of the inputs. Adding noise can
help improve the computed gradients. Refer to
this paper for more details:
https://arxiv.org/pdf/1706.03825.pdf
"""
step_count = proto.Field(proto.INT32, number=1,)
smooth_grad_config = proto.Field(
proto.MESSAGE, number=2, message="SmoothGradConfig",
)
class XraiAttribution(proto.Message):
r"""An explanation method that redistributes Integrated Gradients
attributions to segmented regions, taking advantage of the
model's fully differentiable structure. Refer to this paper for
more details: https://arxiv.org/abs/1906.02825
Supported only by image Models.
Attributes:
step_count (int):
Required. The number of steps for approximating the path
integral. A good value to start is 50 and gradually increase
until the sum to diff property is met within the desired
error range.
Valid range of its value is [1, 100], inclusively.
smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig):
Config for SmoothGrad approximation of
gradients.
When enabled, the gradients are approximated by
averaging the gradients from noisy samples in
the vicinity of the inputs. Adding noise can
help improve the computed gradients. Refer to
this paper for more details:
https://arxiv.org/pdf/1706.03825.pdf
"""
step_count = proto.Field(proto.INT32, number=1,)
smooth_grad_config = proto.Field(
proto.MESSAGE, number=2, message="SmoothGradConfig",
)
class SmoothGradConfig(proto.Message):
r"""Config for SmoothGrad approximation of gradients.
When enabled, the gradients are approximated by averaging the
gradients from noisy samples in the vicinity of the inputs.
Adding noise can help improve the computed gradients. Refer to
this paper for more details:
https://arxiv.org/pdf/1706.03825.pdf
Attributes:
noise_sigma (float):
This is a single float value and will be used to add noise
to all the features. Use this field when all features are
normalized to have the same distribution: scale to range [0,
1], [-1, 1] or z-scoring, where features are normalized to
have 0-mean and 1-variance. Learn more about
`normalization <https://developers.google.com/machine-learning/data-prep/transform/normalization>`__.
For best results the recommended value is about 10% - 20% of
the standard deviation of the input feature. Refer to
section 3.2 of the SmoothGrad paper:
https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
If the distribution is different per feature, set
[feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma]
instead for each feature.
feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma):
This is similar to
[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma],
but provides additional flexibility. A separate noise sigma
can be provided for each feature, which is useful if their
distributions are different. No noise is added to features
that are not set. If this field is unset,
[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
will be used for all features.
noisy_sample_count (int):
The number of gradient samples to use for approximation. The
higher this number, the more accurate the gradient is, but
the runtime complexity increases by this factor as well.
Valid range of its value is [1, 50]. Defaults to 3.
"""
noise_sigma = proto.Field(proto.FLOAT, number=1, oneof="GradientNoiseSigma",)
feature_noise_sigma = proto.Field(
proto.MESSAGE,
number=2,
oneof="GradientNoiseSigma",
message="FeatureNoiseSigma",
)
noisy_sample_count = proto.Field(proto.INT32, number=3,)
class FeatureNoiseSigma(proto.Message):
r"""Noise sigma by features. Noise sigma represents the standard
deviation of the gaussian kernel that will be used to add noise
to interpolated inputs prior to computing gradients.
Attributes:
noise_sigma (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma.NoiseSigmaForFeature]):
Noise sigma per feature. No noise is added to
features that are not set.
"""
class NoiseSigmaForFeature(proto.Message):
r"""Noise sigma for a single feature.
Attributes:
name (str):
The name of the input feature for which noise sigma is
provided. The features are defined in [explanation metadata
inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
sigma (float):
This represents the standard deviation of the Gaussian
kernel that will be used to add noise to the feature prior
to computing gradients. Similar to
[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
but represents the noise added to the current feature.
Defaults to 0.1.
"""
name = proto.Field(proto.STRING, number=1,)
sigma = proto.Field(proto.FLOAT, number=2,)
noise_sigma = proto.RepeatedField(
proto.MESSAGE, number=1, message=NoiseSigmaForFeature,
)
class ExplanationSpecOverride(proto.Message):
r"""The
[ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec]
entries that can be overridden at [online
explanation][PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]
time.
Attributes:
parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters):
The parameters to be overridden. Note that the
[method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method]
cannot be changed. If not specified, no parameter is
overridden.
metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride):
The metadata to be overridden. If not
specified, no metadata is overridden.
"""
parameters = proto.Field(proto.MESSAGE, number=1, message="ExplanationParameters",)
metadata = proto.Field(
proto.MESSAGE, number=2, message="ExplanationMetadataOverride",
)
class ExplanationMetadataOverride(proto.Message):
r"""The
[ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata]
entries that can be overridden at [online
explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain]
time.
Attributes:
inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputsEntry]):
Required. Overrides the [input
metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]
of the features. The key is the name of the feature to be
overridden. The keys specified here must exist in the input
metadata to be overridden. If a feature is not specified
here, the corresponding feature's input metadata is not
overridden.
"""
class InputMetadataOverride(proto.Message):
r"""The [input
metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
entries to be overridden.
Attributes:
input_baselines (Sequence[google.protobuf.struct_pb2.Value]):
Baseline inputs for this feature.
This overrides the ``input_baseline`` field of the
[ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
object of the corresponding feature's input metadata. If
it's not specified, the original baselines are not
overridden.
"""
input_baselines = proto.RepeatedField(
proto.MESSAGE, number=1, message=struct_pb2.Value,
)
inputs = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message=InputMetadataOverride,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1beta1/types/explanation.py
|
Python
|
apache-2.0
| 25,492
|
[
"Gaussian"
] |
8d3f086c244608d3cfcf0e313a372315454b55f712b4534fe9b84c148090d17c
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLimma(RPackage):
"""Data analysis, linear models and differential expression
for microarray data."""
homepage = "https://www.bioconductor.org/packages/limma/"
url = "https://www.bioconductor.org/packages/release/bioc/src/contrib/limma_3.32.6.tar.gz"
list_url = homepage
version('3.32.6', 'df5dc2b85189a24e939efa3a8e6abc41')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/r-limma/package.py
|
Python
|
lgpl-2.1
| 1,617
|
[
"Bioconductor"
] |
91e8110b2fbde73bde2429bacfe01882dc6d3318814bb59e5fe8b5dec59661c8
|
# -*- coding: utf-8 -*-
u"""Traits-based GUI for head-MRI coregistration.
Hierarchy
---------
This is the hierarchy of classes for control. Brackets like [1] denote
properties that are set to be equivalent.
::
CoregFrame: GUI for head-MRI coregistration.
|-- CoregModel (model): Traits object for estimating the head mri transform.
| |-- MRIHeadWithFiducialsModel (mri) [1]: Represent an MRI head shape (high and low res) with fiducials.
| | |-- SurfaceSource (bem_high_res): High-res MRI head
| | |-- SurfaceSource (bem_low_res): Low-res MRI head
| | +-- MRISubjectSource (subject_source) [2]: Find subjects in SUBJECTS_DIR and select one.
| |-- FiducialsSource (fid): Expose points of a given fiducials fif file.
| +-- DigSource (hsp): Expose measurement information from a inst file.
|-- MlabSceneModel (scene) [3]: mayavi.core.ui.mayavi_scene
|-- DataPanel (data_panel)
| |-- HeadViewController (headview) [4]: Set head views for the given coordinate system.
| | +-- MlabSceneModel (scene) [3*]: ``HeadViewController(scene=CoregFrame.scene)``
| |-- SubjectSelectorPanel (subject_panel): Subject selector panel
| | +-- MRISubjectSource (model) [2*]: ``SubjectSelectorPanel(model=self.model.mri.subject_source)``
| +-- FiducialsPanel (fid_panel): Set fiducials on an MRI surface.
| |-- MRIHeadWithFiducialsModel (model) [1*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| |-- HeadViewController (headview) [4*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| +-- SurfaceObject (hsp_obj) [5*]: ``CoregFrame.fid_panel.hsp_obj = CoregFrame.mri_obj``
|-- CoregPanel (coreg_panel): Coregistration panel for Head<->MRI with scaling.
| +-- FittingOptionsPanel (fitting_options_panel): panel for fitting options.
|-- SurfaceObject (mri_obj) [5]: Represent a solid object in a mayavi scene.
+-- PointObject ({hsp, eeg, lpa, nasion, rpa, hsp_lpa, hsp_nasion, hsp_rpa} + _obj): Represent a group of individual points in a mayavi scene.
In the MRI viewing frame, MRI points are transformed via scaling, then by
mri_head_t to the Neuromag head coordinate frame. Digitized points (in head
coordinate frame) are never transformed.
Units
-----
User-facing GUI values are in readable units:
- ``scale_*`` are in %
- ``trans_*`` are in mm
- ``rot_*`` are in °
Internal computation quantities ``parameters`` are in units of (for X/Y/Z):
- ``parameters[:3]`` are in radians
- ``parameters[3:6]`` are in m
- ``paramteres[6:9]`` are in scale proportion
Conversions are handled via `np.deg2rad`, `np.rad2deg`, and appropriate
multiplications / divisions.
""" # noqa: E501
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import queue
import re
import time
from threading import Thread
import traceback
import warnings
import numpy as np
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, OK, YES, NO, CANCEL, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo, Directory,
Enum, Float, HasTraits, HasPrivateTraits, Instance,
Int, on_trait_change, Property, Str, List)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid, EnumEditor,
Handler, Label, Spring, InstanceEditor, StatusItem,
UIInfo)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..defaults import DEFAULTS
from ..surface import _DistanceQuery, _CheckInside
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
rotation_angles, Transform, _ensure_trans,
rot_to_quat, _angle_between_quats)
from ..coreg import fit_matched_points, scale_mri, _find_fiducials_files
from ..viz.backends._pysurfer_mayavi import _toggle_mlab_render
from ..viz._3d import _get_3d_option
from ..utils import logger, set_config, _pl
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import trans_wildcard, DigSource, SubjectSelectorPanel
from ._viewer import (HeadViewController, PointObject, SurfaceObject,
_DEG_WIDTH, _MM_WIDTH, _BUTTON_WIDTH,
_SHOW_BORDER, _COREG_WIDTH, _SCALE_STEP_WIDTH,
_INC_BUTTON_WIDTH, _SCALE_WIDTH, _WEIGHT_WIDTH,
_MM_STEP_WIDTH, _DEG_STEP_WIDTH, _REDUCED_TEXT_WIDTH,
_RESET_LABEL, _RESET_WIDTH,
laggy_float_editor_scale, laggy_float_editor_deg,
laggy_float_editor_mm, laggy_float_editor_weight)
try:
from traitsui.api import RGBColor
except ImportError:
from traits.api import RGBColor
defaults = DEFAULTS['coreg']
class busy(object):
"""Set the GUI state to busy."""
def __enter__(self): # noqa: D105
GUI.set_busy(True)
def __exit__(self, type, value, traceback): # noqa: D105
GUI.set_busy(False)
def _pass(x):
"""Format text without changing it."""
return x
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(DigSource, ())
# parameters
guess_mri_subject = Bool(True) # change MRI subject when dig file changes
grow_hair = Float(label=u"ΔHair", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape (mm)")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(100, label="X")
scale_y = Float(100, label="Y")
scale_z = Float(100, label="Z")
trans_x = Float(0, label=u"ΔX")
trans_y = Float(0, label=u"ΔY")
trans_z = Float(0, label=u"ΔZ")
rot_x = Float(0, label=u"∠X")
rot_y = Float(0, label=u"∠Y")
rot_z = Float(0, label=u"∠Z")
parameters = List()
last_parameters = List()
lpa_weight = Float(1.)
nasion_weight = Float(10.)
rpa_weight = Float(1.)
hsp_weight = Float(1.)
eeg_weight = Float(1.)
hpi_weight = Float(1.)
iteration = Int(-1)
icp_iterations = Int(20)
icp_start_time = Float(0.0)
icp_angle = Float(0.2)
icp_distance = Float(0.2)
icp_scale = Float(0.2)
icp_fid_match = Enum('nearest', 'matched')
fit_icp_running = Bool(False)
fits_icp_running = Bool(False)
coord_frame = Enum('mri', 'head', desc='Display coordinate frame')
status_text = Str()
# options during scaling
scale_labels = Bool(True, desc="whether to scale *.label files")
copy_annot = Bool(True, desc="whether to copy *.annot files for scaled "
"subject")
prepare_bem_model = Bool(True, desc="whether to run make_bem_solution "
"after scaling the MRI")
# secondary to parameters
has_nasion_data = Property(
Bool, depends_on=['mri:nasion', 'hsp:nasion'])
has_lpa_data = Property(
Bool, depends_on=['mri:lpa', 'hsp:lpa'])
has_rpa_data = Property(
Bool, depends_on=['mri:rpa', 'hsp:rpa'])
has_fid_data = Property( # conjunction
Bool, depends_on=['has_nasion_data', 'has_lpa_data', 'has_rpa_data'])
has_mri_data = Property(
Bool, depends_on=['transformed_high_res_mri_points'])
has_hsp_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:points'])
has_eeg_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:eeg_points'])
has_hpi_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:hpi_points'])
n_icp_points = Property(
Int, depends_on=['has_nasion_data', 'nasion_weight',
'has_lpa_data', 'lpa_weight',
'has_rpa_data', 'rpa_weight',
'hsp:points', 'hsp_weight',
'hsp:eeg_points', 'eeg_weight',
'hsp:hpi_points', 'hpi_weight'])
changes = Property(depends_on=['parameters', 'old_parameters'])
# target transforms
mri_head_t = Property(
desc="Transformation of the scaled MRI to the head coordinate frame.",
depends_on=['parameters[]'])
head_mri_t = Property(depends_on=['mri_head_t'])
mri_trans_noscale = Property(depends_on=['mri_head_t', 'coord_frame'])
mri_trans = Property(depends_on=['mri_trans_noscale', 'parameters[]'])
hsp_trans = Property(depends_on=['head_mri_t', 'coord_frame'])
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(
Bool,
depends_on=['n_scale_params', 'subject_has_bem'])
can_save = Property(Bool, depends_on=['mri_head_t'])
raw_subject = Property(
desc="Subject guess based on the raw file name.",
depends_on=['hsp:inst_fname'])
# Always computed in the MRI coordinate frame for speed
# (building the nearest-neighbor tree is slow!)
# though it will always need to be rebuilt in (non-uniform) scaling mode
nearest_calc = Instance(_DistanceQuery)
# MRI geometry transformed to viewing coordinate system
processed_high_res_mri_points = Property(
depends_on=['mri:bem_high_res:surf', 'grow_hair'])
processed_low_res_mri_points = Property(
depends_on=['mri:bem_low_res:surf', 'grow_hair'])
transformed_high_res_mri_points = Property(
depends_on=['processed_high_res_mri_points', 'mri_trans'])
transformed_low_res_mri_points = Property(
depends_on=['processed_low_res_mri_points', 'mri_trans'])
nearest_transformed_high_res_mri_idx_lpa = Property(
depends_on=['nearest_calc', 'hsp:lpa', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_nasion = Property(
depends_on=['nearest_calc', 'hsp:nasion', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_rpa = Property(
depends_on=['nearest_calc', 'hsp:rpa', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_hsp = Property(
depends_on=['nearest_calc', 'hsp:points', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_orig_hsp = Property(
depends_on=['nearest_calc', 'hsp:points', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_eeg = Property(
depends_on=['nearest_calc', 'hsp:eeg_points', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_hpi = Property(
depends_on=['nearest_calc', 'hsp:hpi_points', 'head_mri_t'])
transformed_mri_lpa = Property(
depends_on=['mri:lpa', 'mri_trans'])
transformed_mri_nasion = Property(
depends_on=['mri:nasion', 'mri_trans'])
transformed_mri_rpa = Property(
depends_on=['mri:rpa', 'mri_trans'])
# HSP geometry transformed to viewing coordinate system
transformed_hsp_points = Property(
depends_on=['hsp:points', 'hsp_trans'])
transformed_orig_hsp_points = Property(
depends_on=['hsp:_hsp_points', 'hsp_trans'])
transformed_hsp_lpa = Property(
depends_on=['hsp:lpa', 'hsp_trans'])
transformed_hsp_nasion = Property(
depends_on=['hsp:nasion', 'hsp_trans'])
transformed_hsp_rpa = Property(
depends_on=['hsp:rpa', 'hsp_trans'])
transformed_hsp_eeg_points = Property(
depends_on=['hsp:eeg_points', 'hsp_trans'])
transformed_hsp_hpi = Property(
depends_on=['hsp:hpi_points', 'hsp_trans'])
# fit properties
lpa_distance = Property(
depends_on=['transformed_mri_lpa', 'transformed_hsp_lpa'])
nasion_distance = Property(
depends_on=['transformed_mri_nasion', 'transformed_hsp_nasion'])
rpa_distance = Property(
depends_on=['transformed_mri_rpa', 'transformed_hsp_rpa'])
point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_hsp',
'nearest_transformed_high_res_mri_idx_eeg',
'nearest_transformed_high_res_mri_idx_hpi',
'hsp_weight',
'eeg_weight',
'hpi_weight'])
orig_hsp_point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_orig_hsp',
'hpi_weight'])
# fit property info strings
fid_eval_str = Property(
depends_on=['lpa_distance', 'nasion_distance', 'rpa_distance'])
points_eval_str = Property(
depends_on=['point_distance'])
def _parameters_default(self):
return list(_DEFAULT_PARAMETERS)
def _last_parameters_default(self):
return list(_DEFAULT_PARAMETERS)
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.mri_head_t != np.eye(4))
@cached_property
def _get_has_lpa_data(self):
return (np.any(self.mri.lpa) and np.any(self.hsp.lpa))
@cached_property
def _get_has_nasion_data(self):
return (np.any(self.mri.nasion) and np.any(self.hsp.nasion))
@cached_property
def _get_has_rpa_data(self):
return (np.any(self.mri.rpa) and np.any(self.hsp.rpa))
@cached_property
def _get_has_fid_data(self):
return self.has_nasion_data and self.has_lpa_data and self.has_rpa_data
@cached_property
def _get_has_mri_data(self):
return len(self.transformed_high_res_mri_points) > 0
@cached_property
def _get_has_hsp_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hsp) > 0)
@cached_property
def _get_has_eeg_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_eeg) > 0)
@cached_property
def _get_has_hpi_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hpi) > 0)
@cached_property
def _get_n_icp_points(self):
"""Get parameters for an ICP iteration."""
n = (self.hsp_weight > 0) * len(self.hsp.points)
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
n += 1
n += (self.eeg_weight > 0) * len(self.hsp.eeg_points)
n += (self.hpi_weight > 0) * len(self.hsp.hpi_points)
return n
@cached_property
def _get_changes(self):
new = np.array(self.parameters, float)
old = np.array(self.last_parameters, float)
move = np.linalg.norm(old[3:6] - new[3:6]) * 1e3
angle = np.rad2deg(_angle_between_quats(
rot_to_quat(rotation(*new[:3])[:3, :3]),
rot_to_quat(rotation(*old[:3])[:3, :3])))
percs = 100 * (new[6:] - old[6:]) / old[6:]
return move, angle, percs
@cached_property
def _get_mri_head_t(self):
# rotate and translate hsp
trans = rotation(*self.parameters[:3])
trans[:3, 3] = np.array(self.parameters[3:6])
return trans
@cached_property
def _get_head_mri_t(self):
trans = rotation(*self.parameters[:3]).T
trans[:3, 3] = -np.dot(trans[:3, :3], self.parameters[3:6])
# should be the same as np.linalg.inv(self.mri_head_t)
return trans
@cached_property
def _get_processed_high_res_mri_points(self):
return self._get_processed_mri_points('high')
@cached_property
def _get_processed_low_res_mri_points(self):
return self._get_processed_mri_points('low')
def _get_processed_mri_points(self, res):
bem = self.mri.bem_low_res if res == 'low' else self.mri.bem_high_res
if self.grow_hair:
if len(bem.surf.nn):
scaled_hair_dist = (1e-3 * self.grow_hair /
np.array(self.parameters[6:9]))
points = bem.surf.rr.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += bem.surf.nn[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing from bem, can't grow hair")
self.grow_hair = 0
else:
return bem.surf.rr
@cached_property
def _get_mri_trans(self):
t = self.mri_trans_noscale.copy()
t[:, :3] *= self.parameters[6:9]
return t
@cached_property
def _get_mri_trans_noscale(self):
if self.coord_frame == 'head':
t = self.mri_head_t
else:
t = np.eye(4)
return t
@cached_property
def _get_hsp_trans(self):
if self.coord_frame == 'head':
t = np.eye(4)
else:
t = self.head_mri_t
return t
@cached_property
def _get_nearest_transformed_high_res_mri_idx_lpa(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.lpa))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_nasion(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.nasion))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_rpa(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.rpa))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hsp(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.points))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_orig_hsp(self):
# This is redundant to some extent with the one above due to
# overlapping points, but it's fast and the refactoring to
# remove redundancy would be a pain.
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp._hsp_points))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_eeg(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.eeg_points))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hpi(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.hpi_points))[1]
# MRI view-transformed data
@cached_property
def _get_transformed_low_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_low_res_mri_points)
return points
def _nearest_calc_default(self):
return _DistanceQuery(
self.processed_high_res_mri_points * self.parameters[6:9])
@on_trait_change('processed_high_res_mri_points')
def _update_nearest_calc(self):
self.nearest_calc = self._nearest_calc_default()
@cached_property
def _get_transformed_high_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_high_res_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_trans, self.mri.rpa)
# HSP view-transformed data
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp.points)
@cached_property
def _get_transformed_orig_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp._hsp_points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.hsp_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.hsp_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.hsp_trans, self.hsp.rpa)
@cached_property
def _get_transformed_hsp_eeg_points(self):
return apply_trans(self.hsp_trans, self.hsp.eeg_points)
@cached_property
def _get_transformed_hsp_hpi(self):
return apply_trans(self.hsp_trans, self.hsp.hpi_points)
# Distances, etc.
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.linalg.norm(d)
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.linalg.norm(d)
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.linalg.norm(d)
@cached_property
def _get_point_distance(self):
mri_points = list()
hsp_points = list()
if self.hsp_weight > 0 and self.has_hsp_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
hsp_points.append(self.transformed_hsp_points)
assert len(mri_points[-1]) == len(hsp_points[-1])
if self.eeg_weight > 0 and self.has_eeg_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
hsp_points.append(self.transformed_hsp_eeg_points)
assert len(mri_points[-1]) == len(hsp_points[-1])
if self.hpi_weight > 0 and self.has_hpi_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
hsp_points.append(self.transformed_hsp_hpi)
assert len(mri_points[-1]) == len(hsp_points[-1])
if all(len(h) == 0 for h in hsp_points):
return None
mri_points = np.concatenate(mri_points)
hsp_points = np.concatenate(hsp_points)
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_orig_hsp_point_distance(self):
mri_points = self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_orig_hsp]
hsp_points = self.transformed_orig_hsp_points
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
return u'Fiducials: %.1f, %.1f, %.1f mm' % d
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
dists = 1000 * self.point_distance
av_dist = np.mean(dists)
std_dist = np.std(dists)
kinds = [kind for kind, check in
(('HSP', self.hsp_weight > 0 and self.has_hsp_data),
('EEG', self.eeg_weight > 0 and self.has_eeg_data),
('HPI', self.hpi_weight > 0 and self.has_hpi_data))
if check]
return (u"%s %s: %.1f ± %.1f mm"
% (len(dists), '+'.join(kinds), av_dist, std_dist))
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if subject:
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if self.guess_mri_subject:
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance):
"""Exclude head shape points that are far away from the MRI head.
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if distance <= 0:
return
# find the new filter
mask = self.orig_hsp_point_distance <= distance
n_excluded = np.sum(~mask)
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = mask
def fit_fiducials(self, n_scale_params=None):
"""Find rotation and translation to fit all 3 fiducials."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
head_pts = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
mri_pts = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
weights = [self.lpa_weight, self.nasion_weight, self.rpa_weight]
assert n_scale_params in (0, 1) # guaranteed by GUI
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
x0 = np.array(self.parameters[:6 + n_scale_params])
est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params',
scale=n_scale_params, weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
else:
self.parameters[:] = np.concatenate([est, [est[-1]] * 2])
def _setup_icp(self, n_scale_params):
"""Get parameters for an ICP iteration."""
head_pts = list()
mri_pts = list()
weights = list()
if self.has_hsp_data and self.hsp_weight > 0: # should be true
head_pts.append(self.hsp.points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
weights.append(np.full(len(head_pts[-1]), self.hsp_weight))
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
head_pts.append(getattr(self.hsp, key))
if self.icp_fid_match == 'matched':
mri_pts.append(getattr(self.mri, key))
else:
assert self.icp_fid_match == 'nearest'
mri_pts.append(self.processed_high_res_mri_points[
getattr(self, 'nearest_transformed_high_res_mri_idx_%s'
% (key,))])
weights.append(np.full(len(mri_pts[-1]),
getattr(self, '%s_weight' % key)))
if self.has_eeg_data and self.eeg_weight > 0:
head_pts.append(self.hsp.eeg_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
weights.append(np.full(len(mri_pts[-1]), self.eeg_weight))
if self.has_hpi_data and self.hpi_weight > 0:
head_pts.append(self.hsp.hpi_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
weights.append(np.full(len(mri_pts[-1]), self.hpi_weight))
head_pts = np.concatenate(head_pts)
mri_pts = np.concatenate(mri_pts)
weights = np.concatenate(weights)
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
return head_pts, mri_pts, weights
def fit_icp(self, n_scale_params=None):
"""Find MRI scaling, translation, and rotation to match HSP."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
# Initial guess (current state)
assert n_scale_params in (0, 1, 3)
est = self.parameters[:[6, 7, None, 9][n_scale_params]]
# Do the fits, assigning and evaluating at each step
attr = 'fit_icp_running' if n_scale_params == 0 else 'fits_icp_running'
setattr(self, attr, True)
GUI.process_events() # update the cancel button
self.icp_start_time = time.time()
for self.iteration in range(self.icp_iterations):
head_pts, mri_pts, weights = self._setup_icp(n_scale_params)
est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params,
x0=est, out='params', weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
elif n_scale_params == 1:
self.parameters[:] = list(est) + [est[-1]] * 2
else:
self.parameters[:] = est
angle, move, scale = self.changes
if angle <= self.icp_angle and move <= self.icp_distance and \
all(scale <= self.icp_scale):
self.status_text = self.status_text[:-1] + '; converged)'
break
if not getattr(self, attr): # canceled by user
self.status_text = self.status_text[:-1] + '; cancelled)'
break
GUI.process_events() # this will update the head view
else:
self.status_text = self.status_text[:-1] + '; did not converge)'
setattr(self, attr, False)
self.iteration = -1
def get_scaling_job(self, subject_to, skip_fiducials):
"""Find all arguments needed for the scaling worker."""
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if self.can_prepare_bem_model and self.prepare_bem_model:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.parameters[6:9],
skip_fiducials, self.scale_labels, self.copy_annot, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file.
Parameters
----------
fname : str
File path.
"""
self.set_trans(_ensure_trans(read_trans(fname, return_all=True),
'mri', 'head')['trans'])
def reset(self):
"""Reset all the parameters affecting the coregistration."""
with busy():
self.reset_traits(('grow_hair', 'n_scaling_params'))
self.parameters[:] = _DEFAULT_PARAMETERS
self.omit_hsp_points(np.inf)
def set_trans(self, mri_head_t):
"""Set rotation and translation params from a transformation matrix.
Parameters
----------
mri_head_t : array, shape (4, 4)
Transformation matrix from MRI to head space.
"""
with busy():
rot_x, rot_y, rot_z = rotation_angles(mri_head_t)
x, y, z = mri_head_t[:3, 3]
self.parameters[:6] = [rot_x, rot_y, rot_z, x, y, z]
def save_trans(self, fname):
"""Save the head-mri transform as a fif file.
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_t))
def _parameters_items_changed(self):
# Update GUI as necessary
n_scale = self.n_scale_params
for ii, key in enumerate(('rot_x', 'rot_y', 'rot_z')):
val = np.rad2deg(self.parameters[ii])
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('trans_x', 'trans_y', 'trans_z')):
val = self.parameters[ii + 3] * 1e3
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('scale_x', 'scale_y', 'scale_z')):
val = self.parameters[ii + 6] * 1e2
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
# Only update our nearest-neighbor if necessary
if self.parameters[6:9] != self.last_parameters[6:9]:
self._update_nearest_calc()
# Update the status text
move, angle, percs = self.changes
text = u'Change: Δ=%0.1f mm ∠=%0.2f°' % (move, angle)
if n_scale:
text += ' Scale ' if n_scale == 1 else ' Sx/y/z '
text += '/'.join(['%+0.1f%%' % p for p in percs[:n_scale]])
if self.iteration >= 0:
text += u' (iteration %d/%d, %0.1f sec)' % (
self.iteration + 1, self.icp_iterations,
time.time() - self.icp_start_time)
self.last_parameters[:] = self.parameters[:]
self.status_text = text
def _rot_x_changed(self):
self.parameters[0] = np.deg2rad(self.rot_x)
def _rot_y_changed(self):
self.parameters[1] = np.deg2rad(self.rot_y)
def _rot_z_changed(self):
self.parameters[2] = np.deg2rad(self.rot_z)
def _trans_x_changed(self):
self.parameters[3] = self.trans_x * 1e-3
def _trans_y_changed(self):
self.parameters[4] = self.trans_y * 1e-3
def _trans_z_changed(self):
self.parameters[5] = self.trans_z * 1e-3
def _scale_x_changed(self):
if self.n_scale_params == 1:
self.parameters[6:9] = [self.scale_x * 1e-2] * 3
else:
self.parameters[6] = self.scale_x * 1e-2
def _scale_y_changed(self):
self.parameters[7] = self.scale_y * 1e-2
def _scale_z_changed(self):
self.parameters[8] = self.scale_z * 1e-2
class CoregFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def object_title_changed(self, info):
"""Set the title when it gets changed."""
info.ui.title = info.object.title
def close(self, info, is_ok):
"""Handle the close event."""
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
try: # works on Qt only for now
size = (info.ui.control.width(), info.ui.control.height())
except AttributeError:
size = None
# store configuration, but don't prevent from closing on error
try:
info.object.save_config(size=size)
except Exception as exc:
warnings.warn("Error saving GUI configuration:\n%s" % (exc,))
return True
class CoregPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_fitting_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_fitting_options_changed(self, info): # noqa: D102
self.info.object.fitting_options_panel.edit_traits(
parent=self.info.ui.control)
def object_load_trans_changed(self, info): # noqa: D102
# find trans file destination
model = self.info.object.model
raw_dir = os.path.dirname(model.hsp.file)
subject = model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file, parent=self.info.ui.control)
if dlg.open() != OK:
return
trans_file = dlg.path
try:
model.load_trans(trans_file)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans_file, e), "Error Loading Trans File")
raise
def object_save_changed(self, info): # noqa: D102
obj = self.info.object
subjects_dir = obj.model.mri.subjects_dir
subject_from = obj.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if obj.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(self.info.ui.control, msg, title, cancel=True,
default=CANCEL)
if rc == CANCEL:
return
elif rc == YES:
obj.model.mri.save(obj.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if obj.n_scale_params:
subject_to = obj.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal',
parent=self.info.ui.control)
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(obj.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file,
parent=self.info.ui.control)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
obj.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % (e,), "Error Saving Trans File")
raise
# save the scaled MRI
if obj.n_scale_params:
job = obj.model.get_scaling_job(subject_to, skip_fiducials)
obj.queue.put(job)
obj.queue_len += 1
def _make_view_data_panel(scrollable=False):
view = View(VGroup(
VGroup(Item('subject_panel', style='custom'), label="MRI Subject",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2, values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup(Item('hsp_always_visible',
label='Show head shape points', show_label=True,
enabled_when='not lock_fiducials', width=-1),
show_left=False),
Item('fid_panel', style='custom'), label="MRI Fiducials",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup('guess_mri_subject',
Label('Guess subject from name'), show_labels=False),
VGrid(Item('grow_hair', editor=laggy_float_editor_mm,
width=_MM_WIDTH),
Label(u'ΔHair', show_label=True, width=-1), '0',
Item('distance', show_label=False, width=_MM_WIDTH,
editor=laggy_float_editor_mm),
Item('omit_points', width=_BUTTON_WIDTH),
Item('reset_omit_points', width=_RESET_WIDTH),
columns=3, show_labels=False),
Item('omitted_info', style='readonly',
width=_REDUCED_TEXT_WIDTH), label='Digitization source',
show_border=_SHOW_BORDER, show_labels=False),
VGroup(HGroup(Item('headview', style='custom'), Spring(),
show_labels=False),
Item('view_options', width=_REDUCED_TEXT_WIDTH),
label='View', show_border=_SHOW_BORDER, show_labels=False),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=DataPanelHandler())
return view
def _make_view_coreg_panel(scrollable=False):
"""Generate View for CoregPanel."""
view = View(VGroup(
# Scaling
HGroup(Item('n_scale_params', label='Scaling mode',
editor=EnumEditor(values={0: '1:None',
1: '2:Uniform',
3: '3:3-axis'})), Spring()),
VGrid(Item('scale_x', editor=laggy_float_editor_scale,
show_label=True, tooltip="Scale along right-left axis (%)",
enabled_when='n_scale_params > 0', width=_SCALE_WIDTH),
Item('scale_x_dec', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_x_inc', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_step', tooltip="Scaling step (%)",
enabled_when='n_scale_params > 0', width=_SCALE_STEP_WIDTH),
Spring(),
Item('scale_y', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior axis (%)",
width=_SCALE_WIDTH),
Item('scale_y_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_y_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_SCALE_WIDTH),
Spring(),
Item('scale_z', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1', width=_SCALE_WIDTH,
tooltip="Scale along anterior-posterior axis (%)"),
Item('scale_z_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_z_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
'0',
Spring(),
label='Scaling parameters', show_labels=False, columns=5,
show_border=_SHOW_BORDER),
VGrid(Item('fits_icp', enabled_when='n_scale_params > 0 and '
'n_icp_points >= 10',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fits_fid', enabled_when='n_scale_params == 1 and '
'has_fid_data',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance of the three fiducials.",
width=_BUTTON_WIDTH),
Item('cancels_icp', enabled_when="fits_icp_running",
tooltip='Stop ICP fitting', width=_RESET_WIDTH),
Item('reset_scale', enabled_when='n_scale_params',
tooltip="Reset scaling parameters", width=_RESET_WIDTH),
show_labels=False, columns=4),
# Translation and rotation
VGrid(Item('trans_x', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along right-left axis", width=_MM_WIDTH),
Item('trans_x_dec', width=_INC_BUTTON_WIDTH),
Item('trans_x_inc', width=_INC_BUTTON_WIDTH),
Item('trans_step', tooltip="Movement step (mm)",
width=_MM_STEP_WIDTH),
Spring(),
Item('trans_y', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_y_dec', width=_INC_BUTTON_WIDTH),
Item('trans_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_MM_WIDTH),
Spring(),
Item('trans_z', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_z_dec', width=_INC_BUTTON_WIDTH),
Item('trans_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
Item('rot_x', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization backward (-) or forward (+)",
width=_DEG_WIDTH),
Item('rot_x_dec', width=_INC_BUTTON_WIDTH),
Item('rot_x_inc', width=_INC_BUTTON_WIDTH),
Item('rot_step', tooltip=u"Rotation step (°)",
width=_DEG_STEP_WIDTH),
Spring(),
Item('rot_y', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization rightward (-) or "
"leftward (+)", width=_DEG_WIDTH),
Item('rot_y_dec', width=_INC_BUTTON_WIDTH),
Item('rot_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_DEG_WIDTH),
Spring(),
Item('rot_z', editor=laggy_float_editor_deg, show_label=True,
tooltip="Turn the digitization leftward (-) or "
"rightward (+)", width=_DEG_WIDTH),
Item('rot_z_dec', width=_INC_BUTTON_WIDTH),
Item('rot_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
columns=5, show_labels=False, show_border=_SHOW_BORDER,
label=u'Translation (Δ) and Rotation (∠)'),
VGroup(Item('fit_icp', enabled_when='n_icp_points >= 10',
tooltip="Rotate and translate the MRI to minimize the "
"distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fit_fid', enabled_when="has_fid_data",
tooltip="Rotate and translate the MRI to minimize the "
"distance of the three fiducials.", width=_BUTTON_WIDTH),
Item('cancel_icp', enabled_when="fit_icp_running",
tooltip='Stop ICP iterations', width=_RESET_WIDTH),
Item('reset_tr', tooltip="Reset translation and rotation.",
width=_RESET_WIDTH),
show_labels=False, columns=4),
# Fitting weights
Item('fid_eval_str', style='readonly', tooltip='Fiducial differences',
width=_REDUCED_TEXT_WIDTH),
Item('points_eval_str', style='readonly',
tooltip='Point error (μ ± σ)', width=_REDUCED_TEXT_WIDTH),
Item('fitting_options', width=_REDUCED_TEXT_WIDTH, show_label=False),
VGrid(Item('scale_labels', label="Scale label files",
enabled_when='n_scale_params > 0'),
Item('copy_annot', label="Copy annotation files",
enabled_when='n_scale_params > 0'),
Item('prepare_bem_model', label="Prepare BEM",
enabled_when='can_prepare_bem_model'),
show_left=False, label='Subject-saving options', columns=1,
show_border=_SHOW_BORDER),
VGrid(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if scaling is enabled) "
"the scaled MRI", width=_BUTTON_WIDTH),
Item('load_trans', width=_BUTTON_WIDTH,
tooltip="Load Head<->MRI trans file"),
Item('reset_params', tooltip="Reset all coregistration "
"parameters", width=_RESET_WIDTH),
show_labels=False, columns=3),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=CoregPanelHandler())
return view
class FittingOptionsPanel(HasTraits):
"""View options panel."""
model = Instance(CoregModel)
lpa_weight = DelegatesTo('model')
nasion_weight = DelegatesTo('model')
rpa_weight = DelegatesTo('model')
hsp_weight = DelegatesTo('model')
eeg_weight = DelegatesTo('model')
hpi_weight = DelegatesTo('model')
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
icp_iterations = DelegatesTo('model')
icp_start_time = DelegatesTo('model')
icp_angle = DelegatesTo('model')
icp_distance = DelegatesTo('model')
icp_scale = DelegatesTo('model')
icp_fid_match = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
view = View(VGroup(
VGrid(HGroup(Item('icp_iterations', label='Iterations',
width=_MM_WIDTH, tooltip='Maximum ICP iterations to '
'perform (per click)'),
Spring(), show_labels=True), label='ICP iterations (max)',
show_border=_SHOW_BORDER),
VGrid(Item('icp_angle', label=u'Angle (°)', width=_MM_WIDTH,
tooltip='Angle convergence threshold'),
Item('icp_distance', label='Distance (mm)', width=_MM_WIDTH,
tooltip='Distance convergence threshold'),
Item('icp_scale', label='Scale (%)',
tooltip='Scaling convergence threshold', width=_MM_WIDTH,
enabled_when='n_scale_params > 0'),
show_labels=True, label='ICP convergence limits', columns=3,
show_border=_SHOW_BORDER),
VGrid(Item('icp_fid_match', width=-1, show_label=False,
editor=EnumEditor(values=dict(
nearest='1:Closest to surface',
matched='2:MRI fiducials'), cols=2,
format_func=lambda x: x),
tooltip='Match digitization fiducials to MRI fiducials or '
'the closest surface point', style='custom'),
label='Fiducial point matching', show_border=_SHOW_BORDER),
VGrid(
VGrid(Item('lpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for LPA", width=_WEIGHT_WIDTH,
enabled_when='has_lpa_data', label='LPA'),
Item('nasion_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for nasion", label='Nasion',
width=_WEIGHT_WIDTH, enabled_when='has_nasion_data'),
Item('rpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for RPA", width=_WEIGHT_WIDTH,
enabled_when='has_rpa_data', label='RPA'),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Fiducials'),
VGrid(Item('hsp_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for head shape points",
enabled_when='has_hsp_data',
label='HSP', width=_WEIGHT_WIDTH,),
Item('eeg_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for EEG points", label='EEG',
enabled_when='has_eeg_data', width=_WEIGHT_WIDTH),
Item('hpi_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for HPI points", label='HPI',
enabled_when='has_hpi_data', width=_WEIGHT_WIDTH),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Other points (closest-point matched)'),
show_labels=False, label='Point weights', columns=2,
show_border=_SHOW_BORDER),
), title="Fitting options")
_DEFAULT_PARAMETERS = (0., 0., 0., 0., 0., 0., 1., 1., 1.)
class CoregPanel(HasPrivateTraits):
"""Coregistration panel for Head<->MRI with scaling."""
model = Instance(CoregModel)
# parameters
reset_params = Button(label=_RESET_LABEL)
n_scale_params = DelegatesTo('model')
parameters = DelegatesTo('model')
scale_step = Float(1.)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(1.)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(1.)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_fid_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
n_icp_points = DelegatesTo('model')
# fitting with scaling
fits_icp = Button(label='Fit (ICP)')
fits_fid = Button(label='Fit Fid.')
cancels_icp = Button(u'■')
reset_scale = Button(label=_RESET_LABEL)
fits_icp_running = DelegatesTo('model')
# fitting without scaling
fit_icp = Button(label='Fit (ICP)')
fit_fid = Button(label='Fit Fid.')
cancel_icp = Button(label=u'■')
reset_tr = Button(label=_RESET_LABEL)
fit_icp_running = DelegatesTo('model')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
scale_labels = DelegatesTo('model')
copy_annot = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save...")
load_trans = Button(label='Load...')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_status_text = Property(
Str, depends_on=['queue_feedback', 'queue_current', 'queue_len'])
fitting_options_panel = Instance(FittingOptionsPanel)
fitting_options = Button('Fitting options...')
def _fitting_options_panel_default(self):
return FittingOptionsPanel(model=self.model)
view = _make_view_coreg_panel()
def __init__(self, *args, **kwargs): # noqa: D102
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
include_labels, include_annot, bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials, include_labels,
include_annot)
except Exception:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except Exception:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_status_text(self):
items = []
if self.queue_current:
items.append(self.queue_current)
if self.queue_feedback:
items.append(self.queue_feedback)
if self.queue_len:
items.append("%i queued" % self.queue_len)
return ' | '.join(items)
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _n_scale_params_fired(self):
if self.n_scale_params == 0:
use = [1] * 3
elif self.n_scale_params == 1:
use = [np.mean([self.scale_x, self.scale_y, self.scale_z]) /
100.] * 3
else:
use = self.parameters[6:9]
self.parameters[6:9] = use
def _fit_fid_fired(self):
with busy():
self.model.fit_fiducials(0)
def _fit_icp_fired(self):
with busy():
self.model.fit_icp(0)
def _fits_fid_fired(self):
with busy():
self.model.fit_fiducials()
def _fits_icp_fired(self):
with busy():
self.model.fit_icp()
def _cancel_icp_fired(self):
self.fit_icp_running = False
def _cancels_icp_fired(self):
self.fits_icp_running = False
def _reset_scale_fired(self):
self.reset_traits(('scale_x', 'scale_y', 'scale_z'))
def _reset_tr_fired(self):
self.reset_traits(('trans_x', 'trans_y', 'trans_z',
'rot_x', 'rot_y', 'rot_z'))
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_y_dec_fired(self):
self.scale_y -= self.scale_step
def _scale_y_inc_fired(self):
self.scale_y += self.scale_step
def _scale_z_dec_fired(self):
self.scale_z -= self.scale_step
def _scale_z_inc_fired(self):
self.scale_z += self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
"""New MRI dialog."""
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, width=800, height=600,
scrollable=True):
"""Create a view for the CoregFrame."""
# Set the width to 0.99 to "push out" as much as possible, use
# scene_width in the View below
scene = Item('scene', show_label=False, width=0.99,
editor=SceneEditor(scene_class=MayaviScene))
data_panel = VGroup(
Item('data_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_data_panel(scrollable))),
label='Data', show_border=not scrollable, show_labels=False)
# Setting `scrollable=True` for a Group does not seem to have any effect
# (macOS), in order to be effective the parameter has to be set for a View
# object; hence we use a special InstanceEditor to set the parameter
# programmatically:
coreg_panel = VGroup(
Item('coreg_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_coreg_panel(scrollable))),
label="Coregistration", show_border=not scrollable, show_labels=False,
enabled_when="data_panel.fid_panel.locked")
main_layout = 'split' if split else 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
# Here we set the width and height to impossibly small numbers to force the
# window to be as tight as possible
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons, width=width, height=height,
statusbar=[StatusItem('status_text', width=0.55),
StatusItem('queue_status_text', width=0.45)])
return view
class ViewOptionsPanel(HasTraits):
"""View options panel."""
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
bgcolor = RGBColor()
coord_frame = Enum('mri', 'head', label='Display coordinate frame')
head_high_res = Bool(True, label='Show high-resolution head')
head_inside = Bool(True, label='Add opaque inner head surface')
advanced_rendering = Bool(True, label='Use advanced OpenGL',
desc='Enable advanced OpenGL methods that do '
'not work with all renderers (e.g., depth '
'peeling)')
view = View(
VGroup(
Item('mri_obj', style='custom', label="MRI"),
Item('hsp_obj', style='custom', label="Head shape"),
Item('eeg_obj', style='custom', label='EEG'),
Item('hpi_obj', style='custom', label='HPI'),
VGrid(Item('coord_frame', style='custom',
editor=EnumEditor(values={'mri': '1:MRI',
'head': '2:Head'}, cols=2,
format_func=_pass)),
Item('head_high_res'), Spring(),
Item('advanced_rendering'),
Item('head_inside'), Spring(), Spring(),
columns=3, show_labels=True),
Item('hsp_cf_obj', style='custom', label='Head axes'),
Item('mri_cf_obj', style='custom', label='MRI axes'),
HGroup(Item('bgcolor', label='Background'), Spring()),
), title="Display options")
class DataPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_view_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_view_options_changed(self, info): # noqa: D102
self.info.object.view_options_panel.edit_traits(
parent=self.info.ui.control)
class DataPanel(HasTraits):
"""Data loading panel."""
# Set by CoregPanel
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
lock_fiducials = DelegatesTo('model')
guess_mri_subject = DelegatesTo('model')
raw_src = DelegatesTo('model', 'hsp')
# Set internally
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
headview = Instance(HeadViewController)
view_options_panel = Instance(ViewOptionsPanel)
hsp_always_visible = Bool(False, label="Always Show Head Shape")
view_options = Button(label="Display options...")
# Omit Points
distance = Float(10., desc="maximal distance for head shape points from "
"the surface (mm)")
omit_points = Button(label='Omit', desc="to omit head shape points "
"for the purpose of the automatic coregistration "
"procedure (mm).")
grow_hair = DelegatesTo('model')
reset_omit_points = Button(label=_RESET_LABEL, desc="to reset the "
"omission of head shape points to include all.")
omitted_info = Str('No points omitted')
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
return FiducialsPanel(model=self.model.mri, headview=self.headview)
def _headview_default(self):
return HeadViewController(system='RAS', scene=self.scene)
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
n_omitted = self.model.hsp.n_omitted
self.omitted_info = (
"%s pt%s omitted (%0.1f mm)"
% (n_omitted if n_omitted > 0 else 'No', _pl(n_omitted),
self.distance))
@on_trait_change('model:hsp:file')
def _file_change(self):
self._reset_omit_points_fired()
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(np.inf)
self.omitted_info = 'No points omitted (reset)'
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration."""
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
head_high_res = Bool(True)
advanced_rendering = Bool(True)
head_inside = Bool(True)
data_panel = Instance(DataPanel)
coreg_panel = Instance(CoregPanel) # right panel
project_to_surface = DelegatesTo('eeg_obj')
orient_to_surface = DelegatesTo('hsp_obj')
scale_by_distance = DelegatesTo('hsp_obj')
mark_inside = DelegatesTo('hsp_obj')
status_text = DelegatesTo('model')
queue_status_text = DelegatesTo('coreg_panel')
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
title = Str('MNE Coreg')
# visualization (MRI)
mri_obj = Instance(SurfaceObject)
mri_lpa_obj = Instance(PointObject)
mri_nasion_obj = Instance(PointObject)
mri_rpa_obj = Instance(PointObject)
bgcolor = RGBColor((0.5, 0.5, 0.5))
# visualization (Digitization)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['data_panel:hsp_always_visible',
'lock_fiducials'])
# Coordinate frame axes
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
picker = Instance(object)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _model_default(self):
return CoregModel(
scale_labels=self._config.get(
'MNE_COREG_SCALE_LABELS', 'true') == 'true',
copy_annot=self._config.get(
'MNE_COREG_COPY_ANNOT', 'true') == 'true',
prepare_bem_model=self._config.get(
'MNE_COREG_PREPARE_BEM', 'true') == 'true')
def _data_panel_default(self):
return DataPanel(model=self.model, scene=self.scene)
def _coreg_panel_default(self):
return CoregPanel(model=self.model)
def __init__(self, raw=None, subject=None, subjects_dir=None,
guess_mri_subject=True, head_opacity=1.,
head_high_res=True, trans=None, config=None,
project_eeg=False, orient_to_surface=False,
scale_by_distance=False, mark_inside=False,
interaction='trackball', scale=0.16,
advanced_rendering=True, head_inside=True): # noqa: D102
self._config = config or {}
super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject,
head_high_res=head_high_res,
advanced_rendering=advanced_rendering,
head_inside=head_inside)
self._initial_kwargs = dict(project_eeg=project_eeg,
orient_to_surface=orient_to_surface,
scale_by_distance=scale_by_distance,
mark_inside=mark_inside,
head_opacity=head_opacity,
interaction=interaction,
scale=scale, head_inside=head_inside)
self._locked_opacity = self._initial_kwargs['head_opacity']
self._locked_head_inside = self._initial_kwargs['head_inside']
if not 0 <= head_opacity <= 1:
raise ValueError(
"head_opacity needs to be a floating point number between 0 "
"and 1, got %r" % (head_opacity,))
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if raw is not None:
self.model.hsp.file = raw
if subject is not None:
if subject not in self.model.mri.subject_source.subjects:
msg = "%s is not a valid subject. " % subject
# no subjects -> ['']
if any(self.model.mri.subject_source.subjects):
ss = ', '.join(self.model.mri.subject_source.subjects)
msg += ("The following subjects have been found: %s "
"(subjects_dir=%s). " %
(ss, self.model.mri.subjects_dir))
else:
msg += ("No subjects were found in subjects_dir=%s. " %
self.model.mri.subjects_dir)
msg += ("Make sure all MRI subjects have head shape files "
"(run $ mne make_scalp_surfaces).")
raise ValueError(msg)
self.model.mri.subject = subject
if trans is not None:
try:
self.model.load_trans(trans)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans, e), "Error Loading Trans File")
@on_trait_change('subject_panel:subject')
def _set_title(self):
self.title = '%s - MNE Coreg' % self.model.mri.subject
@on_trait_change('scene:activated')
def _init_plot(self):
_toggle_mlab_render(self, False)
self._on_advanced_rendering_change()
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
#
# Due to MESA rendering / z-order bugs, this should be added and
# rendered first (see gh-5375).
color = defaults['head_color']
self.mri_obj = SurfaceObject(
points=np.empty((0, 3)), color=color, tris=np.empty((0, 3)),
scene=self.scene, name="MRI Scalp", block_behind=True,
# opacity=self._initial_kwargs['head_opacity'],
# setting opacity here causes points to be
# [[0, 0, 0]] -- why??
)
self.mri_obj.opacity = self._initial_kwargs['head_opacity']
self.mri_obj.rear_opacity = float(self.head_inside)
self.data_panel.fid_panel.hsp_obj = self.mri_obj
self._update_mri_obj()
self.mri_obj.plot()
# Do not do sync_trait here, instead use notifiers elsewhere
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.mri_lpa_obj = PointObject(scene=self.scene, color=lpa_color,
has_norm=True, point_scale=point_scale,
name='LPA', view='oct')
self.model.sync_trait('transformed_mri_lpa',
self.mri_lpa_obj, 'points', mutual=False)
self.mri_nasion_obj = PointObject(scene=self.scene, color=nasion_color,
has_norm=True,
point_scale=point_scale,
name='Nasion', view='oct')
self.model.sync_trait('transformed_mri_nasion',
self.mri_nasion_obj, 'points', mutual=False)
self.mri_rpa_obj = PointObject(scene=self.scene, color=rpa_color,
has_norm=True, point_scale=point_scale,
name='RPA', view='oct')
self.model.sync_trait('transformed_mri_rpa',
self.mri_rpa_obj, 'points', mutual=False)
# Digitizer Head Shape
kwargs = dict(
view='cloud', scene=self.scene, resolution=20,
orient_to_surface=self._initial_kwargs['orient_to_surface'],
scale_by_distance=self._initial_kwargs['scale_by_distance'],
mark_inside=self._initial_kwargs['mark_inside'])
self.hsp_obj = PointObject(
color=defaults['extra_color'], name='Extra', has_norm=True,
point_scale=defaults['extra_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_points',
self.hsp_obj, 'points', mutual=False)
# Digitizer EEG
self.eeg_obj = PointObject(
color=defaults['eeg_color'], point_scale=defaults['eeg_scale'],
name='EEG', projectable=True, has_norm=True,
project_to_surface=self._initial_kwargs['project_eeg'], **kwargs)
self.model.sync_trait('transformed_hsp_eeg_points',
self.eeg_obj, 'points', mutual=False)
# Digitizer HPI
self.hpi_obj = PointObject(
color=defaults['hpi_color'], name='HPI', has_norm=True,
point_scale=defaults['hpi_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_hpi',
self.hpi_obj, 'points', mutual=False)
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj):
p.inside_color = self.mri_obj.color
self.mri_obj.sync_trait('color', p, 'inside_color',
mutual=False)
# Digitizer Fiducials
point_scale = defaults['dig_fid_scale']
opacity = defaults['dig_fid_opacity']
self.hsp_lpa_obj = PointObject(
scene=self.scene, color=lpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-LPA')
self.model.sync_trait('transformed_hsp_lpa',
self.hsp_lpa_obj, 'points', mutual=False)
self.hsp_nasion_obj = PointObject(
scene=self.scene, color=nasion_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-Nasion')
self.model.sync_trait('transformed_hsp_nasion',
self.hsp_nasion_obj, 'points', mutual=False)
self.hsp_rpa_obj = PointObject(
scene=self.scene, color=rpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-RPA')
self.model.sync_trait('transformed_hsp_rpa',
self.hsp_rpa_obj, 'points', mutual=False)
# All points share these
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj,
self.hsp_lpa_obj, self.hsp_nasion_obj, self.hsp_rpa_obj):
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
self.model.sync_trait('mri_trans_noscale', p, 'project_to_trans',
mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
self.picker = on_pick(self.data_panel.fid_panel._on_pick, type='cell')
# Coordinate frame axes
self.mri_cf_obj = PointObject(
scene=self.scene, color=self.mri_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='MRI', view='arrow')
self.mri_obj.sync_trait('color', self.mri_cf_obj, mutual=False)
self._update_mri_axes()
self.hsp_cf_obj = PointObject(
scene=self.scene, color=self.hsp_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='Head', view='arrow')
self.hsp_cf_obj.sync_trait('color', self.hsp_cf_obj, mutual=False)
self._update_hsp_axes()
self.sync_trait('bgcolor', self.scene, 'background')
self._update_projection_surf()
_toggle_mlab_render(self, True)
self.scene.render()
self.scene.camera.focal_point = (0., 0., 0.)
self.data_panel.view_options_panel = ViewOptionsPanel(
mri_obj=self.mri_obj, hsp_obj=self.hsp_obj,
eeg_obj=self.eeg_obj, hpi_obj=self.hpi_obj,
hsp_cf_obj=self.hsp_cf_obj, mri_cf_obj=self.mri_cf_obj,
head_high_res=self.head_high_res, head_inside=self.head_inside,
bgcolor=self.bgcolor, advanced_rendering=self.advanced_rendering)
self.data_panel.headview.scale = self._initial_kwargs['scale']
self.data_panel.headview.interaction = \
self._initial_kwargs['interaction']
self.data_panel.headview.left = True
self.data_panel.view_options_panel.sync_trait(
'coord_frame', self.model)
for key in ('head_high_res', 'advanced_rendering', 'bgcolor',
'head_inside'):
self.data_panel.view_options_panel.sync_trait(key, self)
@on_trait_change('advanced_rendering')
def _on_advanced_rendering_change(self):
renderer = getattr(self.scene, 'renderer', None)
if renderer is None:
return
if self.advanced_rendering:
renderer.use_depth_peeling = 1
renderer.occlusion_ratio = 0.1
renderer.maximum_number_of_peels = 100
renderer.vtk_window.multi_samples = 0
renderer.vtk_window.alpha_bit_planes = 1
else:
renderer.use_depth_peeling = 0
renderer.vtk_window.multi_samples = 8
renderer.vtk_window.alpha_bit_planes = 0
if hasattr(renderer, 'use_fxaa'):
self.scene.renderer.use_fxaa = _get_3d_option('antialias')
self.scene.render()
@on_trait_change('lock_fiducials')
def _on_lock_change(self):
if not self.lock_fiducials:
if self.mri_obj is None:
self._initial_kwargs['head_opacity'] = 1.
else:
self._locked_opacity = self.mri_obj.opacity
self.mri_obj.opacity = 1.
self._locked_head_inside = self.head_inside
self.head_inside = False
else:
if self.mri_obj is not None:
self.mri_obj.opacity = self._locked_opacity
self.head_inside = self._locked_head_inside
@on_trait_change('head_inside')
def _on_head_inside_change(self):
if self.mri_obj is not None:
self.mri_obj.rear_opacity = float(self.head_inside) # 0 or 1
@cached_property
def _get_hsp_visible(self):
return self.data_panel.hsp_always_visible or self.lock_fiducials
@on_trait_change('model:mri_trans')
def _update_mri_axes(self):
if self.mri_cf_obj is None:
return
nn = apply_trans(self.model.mri_trans, np.eye(3), move=False)
pts = apply_trans(self.model.mri_trans, np.zeros((3, 3)))
self.mri_cf_obj.nn = nn
self.mri_cf_obj.points = pts
@on_trait_change('model:hsp_trans')
def _update_hsp_axes(self):
if self.hsp_cf_obj is None:
return
nn = apply_trans(self.model.hsp_trans, np.eye(3), move=False)
pts = apply_trans(self.model.hsp_trans, np.zeros((3, 3)))
self.hsp_cf_obj.nn = nn
self.hsp_cf_obj.points = pts
@on_trait_change('nearest_calc')
def _update_projection_surf(self):
if len(self.model.processed_low_res_mri_points) <= 1:
return
rr = (self.model.processed_low_res_mri_points *
self.model.parameters[6:9])
surf = dict(rr=rr, tris=self.model.mri.bem_low_res.surf.tris,
nn=self.model.mri.bem_low_res.surf.nn)
check_inside = _CheckInside(surf)
nearest = _DistanceQuery(rr)
for p in (self.eeg_obj, self.hsp_obj, self.hpi_obj):
if p is not None:
p.check_inside = check_inside
p.nearest = nearest
@on_trait_change('model:mri:bem_low_res:surf,head_high_res,'
'model:transformed_high_res_mri_points')
def _update_mri_obj(self):
if self.mri_obj is None:
return
self.mri_obj.tris = getattr(
self.model.mri, 'bem_%s_res'
% ('high' if self.head_high_res else 'low',)).surf.tris
self.mri_obj.points = getattr(
self.model, 'transformed_%s_res_mri_points'
% ('high' if self.head_high_res else 'low',))
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model:mri:fid_file')
def _on_fid_file_loaded(self):
self.data_panel.fid_panel.locked = bool(self.model.mri.fid_file)
def save_config(self, home_dir=None, size=None):
"""Write configuration values."""
def s_c(key, value, lower=True):
value = str(value)
if lower:
value = value.lower()
set_config(key, str(value).lower(), home_dir=home_dir,
set_env=False)
s_c('MNE_COREG_GUESS_MRI_SUBJECT', self.model.guess_mri_subject)
s_c('MNE_COREG_ADVANCED_RENDERING', self.advanced_rendering)
s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res)
if self.lock_fiducials:
opacity = self.mri_obj.opacity
head_inside = self.head_inside
else:
opacity = self._locked_opacity
head_inside = self._locked_head_inside
s_c('MNE_COREG_HEAD_INSIDE', head_inside)
s_c('MNE_COREG_HEAD_OPACITY', opacity)
if size is not None:
s_c('MNE_COREG_WINDOW_WIDTH', size[0])
s_c('MNE_COREG_WINDOW_HEIGHT', size[1])
s_c('MNE_COREG_SCENE_SCALE', self.data_panel.headview.scale)
s_c('MNE_COREG_SCALE_LABELS', self.model.scale_labels)
s_c('MNE_COREG_COPY_ANNOT', self.model.copy_annot)
s_c('MNE_COREG_PREPARE_BEM', self.model.prepare_bem_model)
if self.model.mri.subjects_dir:
s_c('MNE_COREG_SUBJECTS_DIR', self.model.mri.subjects_dir, False)
s_c('MNE_COREG_PROJECT_EEG', self.project_to_surface)
s_c('MNE_COREG_ORIENT_TO_SURFACE', self.orient_to_surface)
s_c('MNE_COREG_SCALE_BY_DISTANCE', self.scale_by_distance)
s_c('MNE_COREG_MARK_INSIDE', self.mark_inside)
s_c('MNE_COREG_INTERACTION', self.data_panel.headview.interaction)
|
kambysese/mne-python
|
mne/gui/_coreg_gui.py
|
Python
|
bsd-3-clause
| 89,515
|
[
"Mayavi"
] |
f33b1f235b43b0ea0453472bb0dd9435a270841e09afb0cd227a71f7d97df807
|
#!/usr/bin/env python
########################################################################
# File : dirac-proxy-info.py
# Author : Adrian Casajus
########################################################################
"""
Print information about the current proxy.
Example:
$ dirac-proxy-info
subject : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar/CN=proxy/CN=proxy
issuer : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar/CN=proxy
identity : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
timeleft : 23:53:55
DIRAC group : dirac_user
path : /tmp/x509up_u40885
username : vhamar
VOMS : True
VOMS fqan : ['/formation']
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import sys
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
from DIRAC.Core.Utilities.ReturnValues import S_OK
class Params(object):
proxyLoc = False
vomsEnabled = True
csEnabled = True
steps = False
checkValid = False
checkClock = True
uploadedInfo = False
def showVersion(self, arg):
print("Version:")
print(" ", __RCSID__)
sys.exit(0)
return S_OK()
def setProxyLocation(self, arg):
self.proxyLoc = arg
return S_OK()
def disableVOMS(self, arg):
self.vomsEnabled = False
return S_OK()
def disableCS(self, arg):
self.csEnabled = False
return S_OK()
def showSteps(self, arg):
self.steps = True
return S_OK()
def validityCheck(self, arg):
self.checkValid = True
return S_OK()
def disableClockCheck(self, arg):
self.checkClock = False
return S_OK()
def setManagerInfo(self, arg):
self.uploadedInfo = True
return S_OK()
@DIRACScript()
def main():
params = Params()
from DIRAC.Core.Base import Script
Script.registerSwitch("f:", "file=", "File to use as user key", params.setProxyLocation)
Script.registerSwitch("i", "version", "Print version", params.showVersion)
Script.registerSwitch("n", "novoms", "Disable VOMS", params.disableVOMS)
Script.registerSwitch("v", "checkvalid", "Return error if the proxy is invalid", params.validityCheck)
Script.registerSwitch("x", "nocs", "Disable CS", params.disableCS)
Script.registerSwitch("e", "steps", "Show steps info", params.showSteps)
Script.registerSwitch("j", "noclockcheck", "Disable checking if time is ok", params.disableClockCheck)
Script.registerSwitch("m", "uploadedinfo", "Show uploaded proxies info", params.setManagerInfo)
Script.disableCS()
Script.parseCommandLine()
from DIRAC.Core.Utilities.NTP import getClockDeviation
from DIRAC import gLogger
from DIRAC.Core.Security.ProxyInfo import getProxyInfo, getProxyStepsInfo
from DIRAC.Core.Security.ProxyInfo import formatProxyInfoAsString, formatProxyStepsInfoAsString
from DIRAC.Core.Security import VOMS
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
if params.csEnabled:
retVal = Script.enableCS()
if not retVal['OK']:
print("Cannot contact CS to get user list")
if params.checkClock:
result = getClockDeviation()
if result['OK']:
deviation = result['Value']
if deviation > 600:
gLogger.error("Your host clock seems to be off by more than TEN MINUTES! Thats really bad.")
elif deviation > 180:
gLogger.error("Your host clock seems to be off by more than THREE minutes! Thats bad.")
elif deviation > 60:
gLogger.error("Your host clock seems to be off by more than a minute! Thats not good.")
result = getProxyInfo(params.proxyLoc, not params.vomsEnabled)
if not result['OK']:
gLogger.error(result['Message'])
sys.exit(1)
infoDict = result['Value']
gLogger.notice(formatProxyInfoAsString(infoDict))
if not infoDict['isProxy']:
gLogger.error('==============================\n!!! The proxy is not valid !!!')
if params.steps:
gLogger.notice("== Steps extended info ==")
chain = infoDict['chain']
stepInfo = getProxyStepsInfo(chain)['Value']
gLogger.notice(formatProxyStepsInfoAsString(stepInfo))
def invalidProxy(msg):
gLogger.error("Invalid proxy:", msg)
sys.exit(1)
if params.uploadedInfo:
result = gProxyManager.getUserProxiesInfo()
if not result['OK']:
gLogger.error("Could not retrieve the uploaded proxies info", result['Message'])
else:
uploadedInfo = result['Value']
if not uploadedInfo:
gLogger.notice("== No proxies uploaded ==")
if uploadedInfo:
gLogger.notice("== Proxies uploaded ==")
maxDNLen = 0
maxGroupLen = 0
for userDN in uploadedInfo:
maxDNLen = max(maxDNLen, len(userDN))
for group in uploadedInfo[userDN]:
maxGroupLen = max(maxGroupLen, len(group))
gLogger.notice(" %s | %s | Until (GMT)" % ("DN".ljust(maxDNLen), "Group".ljust(maxGroupLen)))
for userDN in uploadedInfo:
for group in uploadedInfo[userDN]:
gLogger.notice(" %s | %s | %s" % (userDN.ljust(maxDNLen),
group.ljust(maxGroupLen),
uploadedInfo[userDN][group].strftime("%Y/%m/%d %H:%M")))
if params.checkValid:
if infoDict['secondsLeft'] == 0:
invalidProxy("Proxy is expired")
if params.csEnabled and not infoDict['validGroup']:
invalidProxy("Group %s is not valid" % infoDict['group'])
if 'hasVOMS' in infoDict and infoDict['hasVOMS']:
requiredVOMS = Registry.getVOMSAttributeForGroup(infoDict['group'])
if 'VOMS' not in infoDict or not infoDict['VOMS']:
invalidProxy("Unable to retrieve VOMS extension")
if len(infoDict['VOMS']) > 1:
invalidProxy("More than one voms attribute found")
if requiredVOMS not in infoDict['VOMS']:
invalidProxy("Unexpected VOMS extension %s. Extension expected for DIRAC group is %s" % (
infoDict['VOMS'][0],
requiredVOMS))
result = VOMS.VOMS().getVOMSProxyInfo(infoDict['chain'], 'actimeleft')
if not result['OK']:
invalidProxy("Cannot determine life time of VOMS attributes: %s" % result['Message'])
if int(result['Value'].strip()) == 0:
invalidProxy("VOMS attributes are expired")
sys.exit(0)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_proxy_info.py
|
Python
|
gpl-3.0
| 6,434
|
[
"DIRAC"
] |
26f1a9e3550074fa0a3948fa6ddda6da14d1e6c3157432c150186dd630c0c18d
|
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
import os
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.template import (TemplateDoesNotExist, TemplateSyntaxError,
Context, loader)
from django.test import TestCase, Client
from django.test.client import encode_file
from django.test.utils import ContextList
class AssertContainsTests(TestCase):
def setUp(self):
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve page: Response code was 200 (expected 999)")
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't retrieve page: Response code was 200 (expected 999)")
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve page: Response code was 200 (expected 999)")
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't retrieve page: Response code was 200 (expected 999)")
try:
self.assertNotContains(response, 'once')
except AssertionError, e:
self.assertEquals(str(e), "Response should not contain 'once'")
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response should not contain 'once'")
try:
self.assertContains(response, 'never', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'never' in response (expected 1)")
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 0 instances of 'never' in response (expected 1)")
try:
self.assertContains(response, 'once', 0)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 0)")
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 1 instances of 'once' in response (expected 0)")
try:
self.assertContains(response, 'once', 2)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 2)")
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 1 instances of 'once' in response (expected 2)")
try:
self.assertContains(response, 'twice', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 2 instances of 'twice' in response (expected 1)")
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 2 instances of 'twice' in response (expected 1)")
try:
self.assertContains(response, 'thrice')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't find 'thrice' in response")
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't find 'thrice' in response")
try:
self.assertContains(response, 'thrice', 3)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'thrice' in response (expected 3)")
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 0 instances of 'thrice' in response (expected 3)")
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, u'さかき')
self.assertContains(r, '\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, u'はたけ')
self.assertNotContains(r, '\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertEquals(str(e), "No templates used to render the response")
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: No templates used to render the response")
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template")
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template")
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertEquals(str(e), "Template 'form_view.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertEquals(str(e), "Template 'base.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertEquals(str(e), "Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html")
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'")
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEquals(len(response.redirect_chain), 1)
self.assertEquals(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEquals(len(response.redirect_chain), 3)
self.assertEquals(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEquals(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEquals(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEquals(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEquals(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 200 (expected 302)")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 200 (expected 302)")
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 200 (expected 302)")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 200 (expected 302)")
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'wrong_form' was not used to render the response")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The form 'wrong_form' was not used to render the response")
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the field 'some_field'")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The form 'form' in context 0 does not contain the field 'some_field'")
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'value' on form 'form' in context 0 contains no errors")
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The field 'value' on form 'form' in context 0 contains no errors")
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )")
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )")
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def setUp(self):
self.old_SESSION_ENGINE = settings.SESSION_ENGINE
settings.SESSION_ENGINE = 'regressiontests.test_client_regress.session'
def tearDown(self):
settings.SESSION_ENGINE = self.old_SESSION_ENGINE
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_no_404_template(self):
"Missing templates are correctly reported by test client"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about missing template")
except TemplateDoesNotExist:
pass
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'bad_templates'),)
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEquals(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEquals(url, '/test_client_regress/arg_view/somename/')
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEquals(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEquals(e.args[0], 'does-not-exist')
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, 'request method: HEAD')
self.assertEqual(response.content, '')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: DELETE')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
for method_name in ('get','head','options','put','delete'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = u'{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json)
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return 'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual('--TEST_BOUNDARY', encoded_file[0])
self.assertEqual('Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual('TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual('Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertEqual('Content-Type: application/zip',
encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2])
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEquals(response.content, "HTTP_X_ARG_CHECK: Testing 123")
self.assertEquals(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEquals(response.content, "HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/test_client_regress/check_headers/',
status_code=301, target_status_code=200)
|
350dotorg/Django
|
tests/regressiontests/test_client_regress/models.py
|
Python
|
bsd-3-clause
| 41,597
|
[
"VisIt"
] |
4346c977581195ed8735f71c2182c68fe61da639aea9713facdb9e7a8597ab7b
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
Tip: Use `Graph.as_dot` to visualize the CFG using any DOT viewer.
Note: the CFG tries to include all code paths that MAY be taken, with a single
notable exception:
* function calls do not generate edges corresponding to exceptions they may
raise (i.e. a function call in the middle of a block does not return or jump
to any except or finally block)
TODO(mdan): Consider adding the edges above. They'd only add ~O(n) edges.
"""
# TODO(mdan): The notion of 'statements' below is inaccurate.
# They should rather be called 'block statements', because they include
# statements that may have a body, e.g. if and while.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from enum import Enum
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control
flow order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
self.prev = weakref.WeakSet(self.prev)
def __repr__(self):
if isinstance(self.ast_node, gast.FunctionDef):
return 'def %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.ClassDef):
return 'class %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.withitem):
return parser.unparse(
self.ast_node.context_expr, include_encoding_marker=False).strip()
return parser.unparse(self.ast_node, include_encoding_marker=False).strip()
class Graph(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their successor CFG nodes
"""
def __repr__(self):
return self.as_dot()
def as_dot(self):
"""Print CFG in DOT format."""
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
class _WalkMode(Enum):
FORWARD = 1
REVERSE = 2
# TODO(mdan): Rename to DataFlowAnalyzer.
# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def __init__(self, graph):
self.graph = graph
self.reset()
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
must overload this to control what that is initialized to.
Args:
node: Node
"""
raise NotImplementedError('Subclasses must implement this.')
# TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def can_ignore(self, node):
"""Returns True if the node can safely be assumed not to touch variables."""
ast_node = node.ast_node
if anno.hasanno(ast_node, anno.Basic.SKIP_PROCESSING):
return True
if six.PY2:
if (isinstance(ast_node, gast.Name) and
ast_node.id in ('None', 'True', 'False')):
return True
return isinstance(ast_node,
(gast.Break, gast.Continue, gast.Raise, gast.Pass))
def _visit_internal(self, mode):
"""Visits the CFG, depth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinary statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = {}
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
# Note: This mechanism requires that nodes are added in lexical order (top
# to bottom, depth first).
self.active_stmts = set()
self.owners = {} # type: Set[any]
self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
self.finally_sections = {}
# Dict values represent (entry, exits)
self.finally_section_subgraphs = {
} # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed by the section.
self.section_entry = {}
# Continue jumps keyed by the section they affect.
self.continues = {}
# Raise jumps keyed by the except section guarding them.
self.raises = {}
# The entry of conditional sections, keyed by the section.
self.cond_entry = {}
# Lists of leaf nodes corresponding to each branch in the section.
self.cond_leaves = {}
def _connect_nodes(self, first, second):
"""Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node
"""
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second)
def _add_new_node(self, ast_node):
"""Grows the graph by adding a CFG node following the current leaves."""
if ast_node is self.node_index:
raise ValueError('%s added twice' % ast_node)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node)
self.node_index[ast_node] = node
self.owners[node] = frozenset(self.active_stmts)
if self.head is None:
self.head = node
for leaf in self.leaves:
self._connect_nodes(leaf, node)
# If any finally section awaits its first node, populate it.
for section_id in self.pending_finally_sections:
self.finally_section_subgraphs[section_id][0] = node
self.pending_finally_sections = set()
return node
def begin_statement(self, stmt):
"""Marks the beginning of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes
"""
self.active_stmts.add(stmt)
def end_statement(self, stmt):
"""Marks the end of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes; must match a key
previously passed to begin_statement.
"""
self.active_stmts.remove(stmt)
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
Ordinary nodes are followed by the next node, in lexical order, that is,
they become the new leaf set.
Args:
ast_node: ast.AST
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set((node,))
return node
def _add_jump_node(self, ast_node, guards):
"""Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set()
# The guards themselves may not yet be complete, and will be wired later.
self.finally_sections[node] = guards
return node
def _connect_jump_to_finally_sections(self, node):
"""Connects a jump node to the finally sections protecting it."""
cursor = set((node,))
if node not in self.finally_sections:
return cursor
for guard_section_id in self.finally_sections[node]:
guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
self._connect_nodes(cursor, guard_begin)
cursor = guard_ends
del self.finally_sections[node]
# TODO(mdan): Should garbage-collect finally_section_subgraphs.
return cursor
def add_exit_node(self, ast_node, section_id, guards):
"""Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
Returns:
Node
"""
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
return node
def add_continue_node(self, ast_node, section_id, guards):
"""Grows the graph by adding a reentry node.
This node causes control flow to go back to the loop section's entry.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.continues[section_id].add(node)
def connect_raise_node(self, node, except_guards):
"""Adds extra connection between a raise node and containing except guards.
The node is a graph node, not an ast node.
Args:
node: Node
except_guards: Tuple[ast.AST, ...], the except sections that guard node
"""
for guard in except_guards:
if guard in self.raises:
self.raises[guard].append(node)
else:
self.raises[guard] = [node]
def enter_section(self, section_id):
"""Enters a regular section.
Regular sections admit exit jumps, which end the section.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_exit_node
"""
assert section_id not in self.exits
self.exits[section_id] = set()
def exit_section(self, section_id):
"""Exits a regular section."""
# Exits are jump nodes, which may be protected.
for exit_ in self.exits[section_id]:
self.leaves |= self._connect_jump_to_finally_sections(exit_)
del self.exits[section_id]
def enter_loop_section(self, section_id, entry_node):
"""Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node
for while loops)
"""
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
def exit_loop_section(self, section_id):
"""Exits a loop section."""
self._connect_nodes(self.leaves, self.section_entry[section_id])
# continues are jump nodes, which may be protected.
for reentry in self.continues[section_id]:
guard_ends = self._connect_jump_to_finally_sections(reentry)
self._connect_nodes(guard_ends, self.section_entry[section_id])
# Loop nodes always loop back.
self.leaves = set((self.section_entry[section_id],))
del self.continues[section_id]
del self.section_entry[section_id]
def enter_cond_section(self, section_id):
"""Enters a conditional section.
Conditional sections define an entry node, and one or more branches.
Args:
section_id: Hashable, the same node that will be used in calls to the
section_id arg passed to new_cond_branch
"""
assert section_id not in self.cond_entry
assert section_id not in self.cond_leaves
self.cond_leaves[section_id] = []
def new_cond_branch(self, section_id):
"""Begins a new branch in a cond section."""
assert section_id in self.cond_leaves
if section_id in self.cond_entry:
# Subsequent splits move back to the split point, and memorize the
# current leaves.
self.cond_leaves[section_id].append(self.leaves)
self.leaves = self.cond_entry[section_id]
else:
# If this is the first time we split a section, just remember the split
# point.
self.cond_entry[section_id] = self.leaves
def exit_cond_section(self, section_id):
"""Exits a conditional section."""
for split in self.cond_leaves[section_id]:
self.leaves |= split
del self.cond_entry[section_id]
del self.cond_leaves[section_id]
def enter_except_section(self, section_id):
"""Enters an except section."""
if section_id in self.raises:
self.leaves.update(self.raises[section_id])
def enter_finally_section(self, section_id):
"""Enters a finally section."""
# TODO(mdan): This, not the caller, should track the active sections.
self.finally_section_subgraphs[section_id] = [None, None]
if self.leaves:
self.finally_section_has_direct_flow[section_id] = True
else:
self.finally_section_has_direct_flow[section_id] = False
self.pending_finally_sections.add(section_id)
def exit_finally_section(self, section_id):
"""Exits a finally section."""
assert section_id not in self.pending_finally_sections, 'Empty finally?'
self.finally_section_subgraphs[section_id][1] = self.leaves
# If the guard can only be reached by a jump, then it will not flow
# into the statement that follows it.
if not self.finally_section_has_direct_flow[section_id]:
self.leaves = set()
del self.finally_section_has_direct_flow[section_id]
def build(self):
"""Returns the CFG accumulated so far and resets the builder.
Returns:
Graph
"""
# Freeze the nodes.
for node in self.node_index.values():
node.freeze()
# Build the statement edges.
stmt_next = {}
stmt_prev = {}
for node in self.node_index.values():
for stmt in self.owners[node]:
if stmt not in stmt_prev:
stmt_prev[stmt] = set()
if stmt not in stmt_next:
stmt_next[stmt] = set()
for first, second in self.forward_edges:
stmts_exited = self.owners[first] - self.owners[second]
for stmt in stmts_exited:
stmt_next[stmt].add(second)
stmts_entered = self.owners[second] - self.owners[first]
for stmt in stmts_entered:
stmt_prev[stmt].add(first)
for stmt in stmt_next:
stmt_next[stmt] = frozenset(stmt_next[stmt])
for stmt in stmt_prev:
stmt_prev[stmt] = frozenset(stmt_prev[stmt])
# Construct the final graph object.
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
index=self.node_index,
stmt_prev=stmt_prev,
stmt_next=stmt_next)
# Reset the state.
self.reset()
return result
class AstToCfg(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_finally_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.finalbody:
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _get_enclosing_except_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.handlers:
included.extend(node.handlers)
if isinstance(node, stop_at):
break
return included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(
self, node, exits_nodes_of_type, may_exit_via_except=False):
self.generic_visit(node)
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(exits_nodes_of_type)
assert try_node is not None, '{} that is not enclosed by any of {}'.format(
node, exits_nodes_of_type)
node = self.builder.add_exit_node(node, try_node, guards)
if may_exit_via_except:
except_guards = self._get_enclosing_except_scopes(exits_nodes_of_type)
self.builder.connect_raise_node(node, except_guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(
tuple(loops_to_nodes_of_type))
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_ClassDef(self, node):
# We also keep the ClassDef node in the CFG, since it technically is a
# statement.
# For example, this is legal and allows executing user code:
#
# class Foo(bar()):
# pass
#
# It also has a scope:
#
# class Bar(object):
# a = 1
if self.builder is None:
self.generic_visit(node)
return
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self._process_basic_statement(node)
self._exit_lexical_scope(node)
# TODO(mdan): Track the CFG local to the class definition as well?
self.builder = self.builder_stack.pop()
def _process_function_def(self, node, is_lambda):
# The function body is stored in a separate graph, because function
# definitions have effects very different from function calls.
if self.builder is not None:
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
if is_lambda:
self._process_exit_statement(node.body, (gast.Lambda,))
else:
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_FunctionDef(self, node):
self._process_function_def(node, is_lambda=False)
def visit_Lambda(self, node):
self._process_function_def(node, is_lambda=True)
def visit_Return(self, node):
self._process_exit_statement(node, (gast.FunctionDef,))
def visit_Import(self, node):
self._process_basic_statement(node)
def visit_ImportFrom(self, node):
self._process_basic_statement(node)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Pass(self, node):
self._process_basic_statement(node)
def visit_Global(self, node):
self._process_basic_statement(node)
def visit_Nonlocal(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
self._process_exit_statement(
node, (gast.FunctionDef,), may_exit_via_except=True)
self.builder.errors.add(node)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
self.builder.end_statement(node)
def visit_While(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_For(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# Note: Strictly speaking, this should be node.target + node.iter.
# However, the activity analysis accounts for this inconsistency,
# so dataflow analysis produces the correct values.
self.builder.enter_loop_section(node, node.iter)
# Also include the "extra loop test" annotation, to capture things like the
# control variable for return and break in for loops.
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
self._process_basic_statement(
anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST))
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, (gast.While, gast.For,))
def visit_Continue(self, node):
self._process_continue_statement(node, (gast.While, gast.For,))
def visit_ExceptHandler(self, node):
self.builder.begin_statement(node)
self.builder.enter_except_section(node)
if node.type is not None:
self.visit(node.type)
if node.name is not None:
self.visit(node.name)
for stmt in node.body:
self.visit(stmt)
self.builder.end_statement(node)
def visit_Try(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
# Note: the current simplification is that the try block fully executes
# regardless of whether an exception triggers or not. This is consistent
# with blocks free of try/except, which also don't account for the
# possibility of an exception being raised mid-block.
for stmt in node.body:
self.visit(stmt)
# The orelse is an optional continuation of the body.
if node.orelse:
block_representative = node.orelse[0]
self.builder.enter_cond_section(block_representative)
self.builder.new_cond_branch(block_representative)
for stmt in node.orelse:
self.visit(stmt)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
self._exit_lexical_scope(node)
if node.handlers:
# Using node would be inconsistent. Using the first handler node is also
# inconsistent, but less so.
block_representative = node.handlers[0]
self.builder.enter_cond_section(block_representative)
for block in node.handlers:
self.builder.new_cond_branch(block_representative)
self.visit(block)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
if node.finalbody:
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
self.builder.end_statement(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
for item in node.items:
self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
visitor = AstToCfg()
visitor.visit(node)
return visitor.cfgs
|
gunan/tensorflow
|
tensorflow/python/autograph/pyct/cfg.py
|
Python
|
apache-2.0
| 32,187
|
[
"VisIt"
] |
ad80f35d12dbbf5eb4f3222b5cb28208caaf65b39bdd565458a68b260c9e12d7
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-15 20:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0008_auto_20170906_2108'),
]
operations = [
migrations.RemoveField(
model_name='gym',
name='raid_end_date',
),
migrations.RemoveField(
model_name='gym',
name='raid_level',
),
migrations.RemoveField(
model_name='gym',
name='raid_pokemon',
),
migrations.AlterField(
model_name='gym',
name='gym_hunter_id',
field=models.CharField(
blank=True,
help_text='ID on Gymhuntr',
max_length=32,
null=True),
),
migrations.AlterField(
model_name='gym',
name='image_url',
field=models.CharField(
blank=True,
help_text='URL of image used for Gym',
max_length=256,
null=True
),
),
migrations.AlterField(
model_name='gym',
name='location',
field=models.CharField(
help_text='Location in long,lat format',
max_length=128
),
),
migrations.AlterField(
model_name='gym',
name='name',
field=models.CharField(
help_text='Name of Gym',
max_length=256
),
),
migrations.AlterField(
model_name='gymitem',
name='gym',
field=models.ForeignKey(
help_text='Gym item is for',
on_delete=django.db.models.deletion.CASCADE,
to='app.Gym'
),
),
migrations.AlterField(
model_name='gymitem',
name='hidden',
field=models.BooleanField(
default=False,
help_text='Hide this gym?'
),
),
migrations.AlterField(
model_name='gymitem',
name='last_visit_date',
field=models.DateField(
blank=True,
help_text='Date of last visit',
null=True
),
),
migrations.AlterField(
model_name='gymitem',
name='profile',
field=models.ForeignKey(
help_text='Profile gym item is associated with',
on_delete=django.db.models.deletion.CASCADE,
to='app.Profile'
),
),
migrations.AlterField(
model_name='profile',
name='pokemon_go_username',
field=models.CharField(
blank=True,
help_text='Your name in Pokemon Go',
max_length=128,
null=True
),
)
]
|
Gimpneek/exclusive-raid-gym-tracker
|
app/migrations/0009_auto_20170915_2021.py
|
Python
|
gpl-3.0
| 3,074
|
[
"VisIt"
] |
9c74c2a412f2445aca57c072a96456801ec2d7887b37de0b308f58f9e0460c00
|
"""
API operations on the library datasets.
"""
import glob
import os
import os.path
import string
import sys
import tempfile
import zipfile
from galaxy import exceptions
from galaxy import util
from galaxy import web
from galaxy.exceptions import ObjectNotFound
from galaxy.managers import folders, roles
from galaxy.tools.actions import upload_common
from galaxy.util.json import dumps
from galaxy.util.streamball import StreamBall
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin
from paste.httpexceptions import HTTPBadRequest, HTTPInternalServerError
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
import logging
log = logging.getLogger( __name__ )
class LibraryDatasetsController( BaseAPIController, UsesVisualizationMixin ):
def __init__( self, app ):
super( LibraryDatasetsController, self ).__init__( app )
self.folder_manager = folders.FolderManager()
self.role_manager = roles.RoleManager()
@expose_api_anonymous
def show( self, trans, id, **kwd ):
"""
show( self, trans, id, **kwd )
* GET /api/libraries/datasets/{encoded_dataset_id}:
Displays information about the dataset identified by the encoded ID.
:param id: the encoded id of the dataset to query
:type id: an encoded id string
:returns: detailed dataset information from base controller
:rtype: dictionary
.. seealso:: :attr:`galaxy.web.base.controller.UsesLibraryMixinItems.get_library_dataset`
"""
try:
library_dataset = self.get_library_dataset( trans, id=id, check_ownership=False, check_accessible=True )
except Exception:
raise exceptions.ObjectNotFound( 'Requested library_dataset was not found.' )
current_user_roles = trans.get_current_user_roles()
# Build the full path for breadcrumb purposes.
full_path = self._build_path( trans, library_dataset.folder )
dataset_item = ( trans.security.encode_id( library_dataset.id ), library_dataset.name )
full_path.insert(0, dataset_item)
full_path = full_path[ ::-1 ]
# Find expired versions of the library dataset
expired_ldda_versions = []
for expired_ldda in library_dataset.expired_datasets:
expired_ldda_versions.append( ( trans.security.encode_id( expired_ldda.id ), expired_ldda.name ) )
rval = trans.security.encode_all_ids( library_dataset.to_dict() )
if len(expired_ldda_versions) > 0:
rval[ 'has_versions' ] = True
rval[ 'expired_versions' ] = expired_ldda_versions
rval[ 'deleted' ] = library_dataset.deleted
rval[ 'folder_id' ] = 'F' + rval[ 'folder_id' ]
rval[ 'full_path' ] = full_path
rval[ 'file_size' ] = util.nice_size( int( library_dataset.library_dataset_dataset_association.get_size() ) )
rval[ 'date_uploaded' ] = library_dataset.library_dataset_dataset_association.create_time.strftime( "%Y-%m-%d %I:%M %p" )
rval[ 'can_user_modify' ] = trans.app.security_agent.can_modify_library_item( current_user_roles, library_dataset) or trans.user_is_admin()
rval[ 'is_unrestricted' ] = trans.app.security_agent.dataset_is_public( library_dataset.library_dataset_dataset_association.dataset )
# Manage dataset permission is always attached to the dataset itself, not the the ld or ldda to maintain consistency
rval[ 'can_user_manage' ] = trans.app.security_agent.can_manage_dataset( current_user_roles, library_dataset.library_dataset_dataset_association.dataset) or trans.user_is_admin()
return rval
@expose_api_anonymous
def show_version( self, trans, encoded_dataset_id, encoded_ldda_id, **kwd ):
"""
show_version( self, trans, encoded_dataset_id, encoded_ldda_id, **kwd ):
* GET /api/libraries/datasets/:encoded_dataset_id/versions/:encoded_ldda_id
Displays information about specific version of the library_dataset (i.e. ldda).
:param encoded_dataset_id: the encoded id of the dataset to query
:type encoded_dataset_id: an encoded id string
:param encoded_ldda_id: the encoded id of the ldda to query
:type encoded_ldda_id: an encoded id string
:rtype: dictionary
:returns: dict of ldda's details
"""
try:
library_dataset = self.get_library_dataset( trans, id=encoded_dataset_id, check_ownership=False, check_accessible=True )
except Exception:
raise exceptions.ObjectNotFound( 'Requested library_dataset was not found.' )
try:
ldda = self.get_library_dataset_dataset_association( trans, id=encoded_ldda_id, check_ownership=False, check_accessible=False )
except Exception, e:
raise exceptions.ObjectNotFound( 'Requested version of library dataset was not found.' + str(e) )
if ldda not in library_dataset.expired_datasets:
raise exceptions.ObjectNotFound( 'Given library dataset does not have the requested version.' )
rval = trans.security.encode_all_ids( ldda.to_dict() )
return rval
@expose_api
def show_roles( self, trans, encoded_dataset_id, **kwd ):
"""
show_roles( self, trans, id, **kwd ):
* GET /api/libraries/datasets/{encoded_dataset_id}/permissions
Displays information about current or available roles
for a given dataset permission.
:param encoded_dataset_id: the encoded id of the dataset to query
:type encoded_dataset_id: an encoded id string
:param scope: either 'current' or 'available'
:type scope: string
:rtype: dictionary
:returns: either dict of current roles for all permission types or
dict of available roles to choose from (is the same for any permission type)
"""
current_user_roles = trans.get_current_user_roles()
try:
library_dataset = self.get_library_dataset( trans, id=encoded_dataset_id, check_ownership=False, check_accessible=False )
except Exception, e:
raise exceptions.ObjectNotFound( 'Requested dataset was not found.' + str(e) )
dataset = library_dataset.library_dataset_dataset_association.dataset
# User has to have manage permissions permission in order to see the roles.
can_manage = trans.app.security_agent.can_manage_dataset( current_user_roles, dataset ) or trans.user_is_admin()
if not can_manage:
raise exceptions.InsufficientPermissionsException( 'You do not have proper permission to access permissions.' )
scope = kwd.get( 'scope', None )
if scope == 'current' or scope is None:
return self._get_current_roles( trans, library_dataset )
# Return roles that are available to select.
elif scope == 'available':
page = kwd.get( 'page', None )
if page is not None:
page = int( page )
else:
page = 1
page_limit = kwd.get( 'page_limit', None )
if page_limit is not None:
page_limit = int( page_limit )
else:
page_limit = 10
query = kwd.get( 'q', None )
roles, total_roles = trans.app.security_agent.get_valid_roles( trans, dataset, query, page, page_limit )
return_roles = []
for role in roles:
return_roles.append( dict( id=role.name, name=role.name, type=role.type ) )
return dict( roles=return_roles, page=page, page_limit=page_limit, total=total_roles )
else:
raise exceptions.RequestParameterInvalidException( "The value of 'scope' parameter is invalid. Alllowed values: current, available" )
def _get_current_roles( self, trans, library_dataset):
"""
Find all roles currently connected to relevant permissions
on the library dataset and the underlying dataset.
:param library_dataset: the model object
:type library_dataset: LibraryDataset
:rtype: dictionary
:returns: dict of current roles for all available permission types
"""
dataset = library_dataset.library_dataset_dataset_association.dataset
# Omit duplicated roles by converting to set
access_roles = set( dataset.get_access_roles( trans ) )
modify_roles = set( trans.app.security_agent.get_roles_for_action( library_dataset, trans.app.security_agent.permitted_actions.LIBRARY_MODIFY ) )
manage_roles = set( dataset.get_manage_permissions_roles( trans ) )
access_dataset_role_list = [ access_role.name for access_role in access_roles ]
manage_dataset_role_list = [ manage_role.name for manage_role in manage_roles ]
modify_item_role_list = [ modify_role.name for modify_role in modify_roles ]
return dict( access_dataset_roles=access_dataset_role_list, modify_item_roles=modify_item_role_list, manage_dataset_roles=manage_dataset_role_list )
@expose_api
def update_permissions( self, trans, encoded_dataset_id, **kwd ):
"""
def update( self, trans, encoded_dataset_id, **kwd ):
*POST /api/libraries/datasets/{encoded_dataset_id}/permissions
:param encoded_dataset_id: the encoded id of the dataset to update permissions of
:type encoded_dataset_id: an encoded id string
:param action: (required) describes what action should be performed
available actions: make_private, remove_restrictions, set_permissions
:type action: string
:param access_ids[]: list of Role.name defining roles that should have access permission on the dataset
:type access_ids[]: string or list
:param manage_ids[]: list of Role.name defining roles that should have manage permission on the dataset
:type manage_ids[]: string or list
:param modify_ids[]: list of Role.name defining roles that should have modify permission on the library dataset item
:type modify_ids[]: string or list
:rtype: dictionary
:returns: dict of current roles for all available permission types
:raises: RequestParameterInvalidException, ObjectNotFound, InsufficientPermissionsException, InternalServerError
RequestParameterMissingException
"""
try:
library_dataset = self.get_library_dataset( trans, id=encoded_dataset_id, check_ownership=False, check_accessible=False )
except Exception, e:
raise exceptions.ObjectNotFound( 'Requested dataset was not found.' + str(e) )
dataset = library_dataset.library_dataset_dataset_association.dataset
current_user_roles = trans.get_current_user_roles()
can_manage = trans.app.security_agent.can_manage_dataset( current_user_roles, dataset ) or trans.user_is_admin()
if not can_manage:
raise exceptions.InsufficientPermissionsException( 'You do not have proper permissions to manage permissions on this dataset.' )
new_access_roles_ids = kwd.get( 'access_ids[]', None )
new_manage_roles_ids = kwd.get( 'manage_ids[]', None )
new_modify_roles_ids = kwd.get( 'modify_ids[]', None )
action = kwd.get( 'action', None )
if action is None:
raise exceptions.RequestParameterMissingException( 'The mandatory parameter "action" is missing.' )
elif action == 'remove_restrictions':
trans.app.security_agent.make_dataset_public( dataset )
if not trans.app.security_agent.dataset_is_public( dataset ):
raise exceptions.InternalServerError( 'An error occured while making dataset public.' )
elif action == 'make_private':
trans.app.security_agent.make_dataset_public( dataset )
private_role = trans.app.security_agent.get_private_user_role( trans.user )
dp = trans.app.model.DatasetPermissions( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action, dataset, private_role )
trans.sa_session.add( dp )
trans.sa_session.flush()
if not trans.app.security_agent.dataset_is_private_to_user( trans, library_dataset ):
raise exceptions.InternalServerError( 'An error occured while making dataset private.' )
elif action == 'set_permissions':
# ACCESS DATASET ROLES
valid_access_roles = []
invalid_access_roles_names = []
if new_access_roles_ids is None:
trans.app.security_agent.make_dataset_public( dataset )
else:
# Check whether we receive only one role, then it is not a list so we make it into one.
if isinstance(new_access_roles_ids, basestring):
new_access_roles_ids = [ new_access_roles_ids ]
for role_id in new_access_roles_ids:
role = self._load_role( trans, role_id )
# Check whether role is in the set of allowed roles
valid_roles, total_roles = trans.app.security_agent.get_valid_roles( trans, dataset )
if role in valid_roles:
valid_access_roles.append( role )
else:
invalid_access_roles_names.append( role_id )
if len( invalid_access_roles_names ) > 0:
log.warning( "The following roles could not be added to the dataset access permission: " + str( invalid_access_roles_names ) )
access_permission = dict( access=valid_access_roles )
trans.app.security_agent.set_dataset_permission( dataset, access_permission )
# MANAGE DATASET ROLES
valid_manage_roles = []
invalid_manage_roles_names = []
new_manage_roles_ids = util.listify( new_manage_roles_ids )
# Load all access roles to check
active_access_roles = dataset.get_access_roles( trans )
for role_id in new_manage_roles_ids:
role = self._load_role( trans, role_id )
# Check whether role is in the set of access roles
if role in active_access_roles:
valid_manage_roles.append( role )
else:
invalid_manage_roles_names.append( role_id )
if len( invalid_manage_roles_names ) > 0:
log.warning( "The following roles could not be added to the dataset manage permission: " + str( invalid_manage_roles_names ) )
manage_permission = { trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS: valid_manage_roles }
trans.app.security_agent.set_dataset_permission( dataset, manage_permission )
# MODIFY LIBRARY ITEM ROLES
valid_modify_roles = []
invalid_modify_roles_names = []
new_modify_roles_ids = util.listify( new_modify_roles_ids )
# Load all access roles to check
active_access_roles = dataset.get_access_roles( trans )
for role_id in new_modify_roles_ids:
role = self._load_role( trans, role_id )
# Check whether role is in the set of access roles
if role in active_access_roles:
valid_modify_roles.append( role )
else:
invalid_modify_roles_names.append( role_id )
if len( invalid_modify_roles_names ) > 0:
log.warning( "The following roles could not be added to the dataset modify permission: " + str( invalid_modify_roles_names ) )
modify_permission = { trans.app.security_agent.permitted_actions.LIBRARY_MODIFY: valid_modify_roles }
trans.app.security_agent.set_library_item_permission( library_dataset, modify_permission )
else:
raise exceptions.RequestParameterInvalidException( 'The mandatory parameter "action" has an invalid value. '
'Allowed values are: "remove_restrictions", "make_private", "set_permissions"' )
return self._get_current_roles( trans, library_dataset )
def _load_role( self, trans, role_name ):
"""
Method loads the role from the DB based on the given role name.
:param role_name: name of the role to load from the DB
:type role_name: string
:rtype: Role
:returns: the loaded Role object
:raises: InconsistentDatabase, RequestParameterInvalidException, InternalServerError
"""
try:
role = trans.sa_session.query( trans.app.model.Role ).filter( trans.model.Role.table.c.name == role_name ).one()
except MultipleResultsFound:
raise exceptions.InconsistentDatabase( 'Multiple roles found with the same name. Name: ' + str( role_name ) )
except NoResultFound:
raise exceptions.RequestParameterInvalidException( 'No role found with the name provided. Name: ' + str( role_name ) )
except Exception, e:
raise exceptions.InternalServerError( 'Error loading from the database.' + str(e))
return role
@expose_api
def delete( self, trans, encoded_dataset_id, **kwd ):
"""
delete( self, trans, encoded_dataset_id, **kwd ):
* DELETE /api/libraries/datasets/{encoded_dataset_id}
Marks the dataset deleted or undeleted based on the value
of the undelete flag.
If the flag is not present it is considered False and the
item is marked deleted.
:param encoded_dataset_id: the encoded id of the dataset to change
:type encoded_dataset_id: an encoded id string
:rtype: dictionary
:returns: dict containing information about the dataset
"""
undelete = util.string_as_bool( kwd.get( 'undelete', False ) )
try:
dataset = self.get_library_dataset( trans, id=encoded_dataset_id, check_ownership=False, check_accessible=False )
except Exception, e:
raise exceptions.ObjectNotFound( 'Requested dataset was not found.' + str(e) )
current_user_roles = trans.get_current_user_roles()
allowed = trans.app.security_agent.can_modify_library_item( current_user_roles, dataset )
if ( not allowed ) and ( not trans.user_is_admin() ):
raise exceptions.InsufficientPermissionsException( 'You do not have proper permissions to delete this dataset.')
if undelete:
dataset.deleted = False
else:
dataset.deleted = True
trans.sa_session.add( dataset )
trans.sa_session.flush()
rval = trans.security.encode_all_ids( dataset.to_dict() )
rval[ 'update_time' ] = dataset.update_time.strftime( "%Y-%m-%d %I:%M %p" )
rval[ 'deleted' ] = dataset.deleted
rval[ 'folder_id' ] = 'F' + rval[ 'folder_id' ]
return rval
@expose_api
def load( self, trans, **kwd ):
"""
Load dataset from the given source into the library.
:param encoded_folder_id: the encoded id of the folder to import dataset to
:type encoded_folder_id: an encoded id string
:param source: source of the dataset to be loaded
:type source: str
:param link_data: flag whether to link the dataset to data or copy it to Galaxy
:type link_data: bool
:param preserve_dirs: flag whether to preserver directory structure when importing dir
:type preserve_dirs: bool
"""
kwd[ 'space_to_tab' ] = 'False'
kwd[ 'to_posix_lines' ] = 'True'
kwd[ 'dbkey' ] = kwd.get( 'dbkey', '?' )
kwd[ 'file_type' ] = kwd.get( 'file_type', 'auto' )
kwd[' link_data_only' ] = 'link_to_files' if util.string_as_bool( kwd.get( 'link_data', False ) ) else 'copy_files'
encoded_folder_id = kwd.get( 'encoded_folder_id', None )
if encoded_folder_id is not None:
folder_id = self.folder_manager.cut_and_decode( trans, encoded_folder_id )
else:
raise exceptions.RequestParameterMissingException( 'The required atribute encoded_folder_id is missing.' )
path = kwd.get( 'path', None)
if path is None:
raise exceptions.RequestParameterMissingException( 'The required atribute path is missing.' )
folder = self.folder_manager.get( trans, folder_id )
source = kwd.get( 'source', None )
if source not in [ 'userdir_file', 'userdir_folder', 'admin_path' ]:
raise exceptions.RequestParameterMissingException( 'You have to specify "source" parameter. Possible values are "userdir_file", "userdir_folder" and "admin_path". ')
if source in [ 'userdir_file', 'userdir_folder' ]:
user_login = trans.user.email
user_base_dir = trans.app.config.user_library_import_dir
if user_base_dir is None:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow upload from user directories.' )
full_dir = os.path.join( user_base_dir, user_login )
# path_to_root_import_folder = None
if not path.lower().startswith( full_dir.lower() ):
# path_to_root_import_folder = path
path = os.path.join( full_dir, path )
if not os.path.exists( path ):
raise exceptions.RequestParameterInvalidException( 'Given path does not exist on the host.' )
if not self.folder_manager.can_add_item( trans, folder ):
raise exceptions.InsufficientPermissionsException( 'You do not have proper permission to add items to the given folder.' )
if source == 'admin_path':
if not trans.app.config.allow_library_path_paste:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to import into library from path.' )
if not trans.user_is_admin:
raise exceptions.AdminRequiredException( 'Only admins can import from path.' )
# Set up the traditional tool state/params
tool_id = 'upload1'
tool = trans.app.toolbox.get_tool( tool_id )
state = tool.new_state( trans )
tool.update_state( trans, tool.inputs_by_page[ 0 ], state.inputs, kwd )
tool_params = state.inputs
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
library_bunch = upload_common.handle_library_params( trans, {}, trans.security.encode_id( folder.id ) )
abspath_datasets = []
kwd[ 'filesystem_paths' ] = path
params = util.Params( kwd )
# user wants to import one file only
if source == "userdir_file":
file = os.path.abspath( path )
abspath_datasets.append( trans.webapp.controllers[ 'library_common' ].make_library_uploaded_dataset(
trans, 'api', params, os.path.basename( file ), file, 'server_dir', library_bunch ) )
# user wants to import whole folder
if source == "userdir_folder":
uploaded_datasets_bunch = trans.webapp.controllers[ 'library_common' ].get_path_paste_uploaded_datasets(
trans, 'api', params, library_bunch, 200, '' )
uploaded_datasets = uploaded_datasets_bunch[0]
if uploaded_datasets is None:
raise exceptions.ObjectNotFound( 'Given folder does not contain any datasets.' )
for ud in uploaded_datasets:
ud.path = os.path.abspath( ud.path )
abspath_datasets.append( ud )
# user wants to import from path (admins only)
if source == "admin_path":
# validate the path is within root
uploaded_datasets_bunch = trans.webapp.controllers[ 'library_common' ].get_path_paste_uploaded_datasets(
trans, 'api', params, library_bunch, 200, '' )
uploaded_datasets = uploaded_datasets_bunch[0]
if uploaded_datasets is None:
raise exceptions.ObjectNotFound( 'Given folder does not contain any datasets.' )
for ud in uploaded_datasets:
ud.path = os.path.abspath( ud.path )
abspath_datasets.append( ud )
json_file_path = upload_common.create_paramfile( trans, abspath_datasets )
data_list = [ ud.data for ud in abspath_datasets ]
job, output = upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=folder )
# HACK: Prevent outputs_to_working_directory from overwriting inputs when "linking"
job.add_parameter( 'link_data_only', dumps( kwd.get( 'link_data_only', 'copy_files' ) ) )
job.add_parameter( 'uuid', dumps( kwd.get( 'uuid', None ) ) )
trans.sa_session.add( job )
trans.sa_session.flush()
job_dict = job.to_dict()
job_dict[ 'id' ] = trans.security.encode_id( job_dict[ 'id' ] )
return job_dict
@web.expose
def download( self, trans, format, **kwd ):
"""
download( self, trans, format, **kwd )
* GET /api/libraries/datasets/download/{format}
* POST /api/libraries/datasets/download/{format}
Downloads requested datasets (identified by encoded IDs) in requested format.
example: ``GET localhost:8080/api/libraries/datasets/download/tbz?ldda_ids%255B%255D=a0d84b45643a2678&ldda_ids%255B%255D=fe38c84dcd46c828``
.. note:: supported format values are: 'zip', 'tgz', 'tbz', 'uncompressed'
:param format: string representing requested archive format
:type format: string
:param lddas[]: an array of encoded ids
:type lddas[]: an array
:rtype: file
:returns: either archive with the requested datasets packed inside or a single uncompressed dataset
:raises: MessageException, ItemDeletionException, ItemAccessibilityException, HTTPBadRequest, OSError, IOError, ObjectNotFound
"""
lddas = []
datasets_to_download = kwd.get( 'ldda_ids%5B%5D', None )
if datasets_to_download is None:
datasets_to_download = kwd.get( 'ldda_ids', None )
if ( datasets_to_download is not None ):
datasets_to_download = util.listify( datasets_to_download )
for dataset_id in datasets_to_download:
try:
ldda = self.get_hda_or_ldda( trans, hda_ldda='ldda', dataset_id=dataset_id )
lddas.append( ldda )
except HTTPBadRequest, e:
raise exceptions.RequestParameterInvalidException( 'Bad Request. ' + str( e.err_msg ) )
except HTTPInternalServerError, e:
raise exceptions.InternalServerError( 'Internal error. ' + str( e.err_msg ) )
except Exception, e:
raise exceptions.InternalServerError( 'Unknown error. ' + str( e ) )
else:
raise exceptions.RequestParameterMissingException( 'Request has to contain a list of dataset ids to download.' )
if format in [ 'zip', 'tgz', 'tbz' ]:
# error = False
killme = string.punctuation + string.whitespace
trantab = string.maketrans( killme, '_'*len( killme ) )
try:
outext = 'zip'
if format == 'zip':
# Can't use mkstemp - the file must not exist first
tmpd = tempfile.mkdtemp()
util.umask_fix_perms( tmpd, trans.app.config.umask, 0777, self.app.config.gid )
tmpf = os.path.join( tmpd, 'library_download.' + format )
if trans.app.config.upstream_gzip:
archive = zipfile.ZipFile( tmpf, 'w', zipfile.ZIP_STORED, True )
else:
archive = zipfile.ZipFile( tmpf, 'w', zipfile.ZIP_DEFLATED, True )
archive.add = lambda x, y: archive.write( x, y.encode( 'CP437' ) )
elif format == 'tgz':
if trans.app.config.upstream_gzip:
archive = StreamBall( 'w|' )
outext = 'tar'
else:
archive = StreamBall( 'w|gz' )
outext = 'tgz'
elif format == 'tbz':
archive = StreamBall( 'w|bz2' )
outext = 'tbz2'
except ( OSError, zipfile.BadZipfile ):
log.exception( "Unable to create archive for download" )
raise exceptions.InternalServerError( "Unable to create archive for download." )
except Exception:
log.exception( "Unexpected error %s in create archive for download" % sys.exc_info()[ 0 ] )
raise exceptions.InternalServerError( "Unable to create archive for download." )
composite_extensions = trans.app.datatypes_registry.get_composite_extensions()
seen = []
for ldda in lddas:
ext = ldda.extension
is_composite = ext in composite_extensions
path = ""
parent_folder = ldda.library_dataset.folder
while parent_folder is not None:
# Exclude the now-hidden "root folder"
if parent_folder.parent is None:
path = os.path.join( parent_folder.library_root[ 0 ].name, path )
break
path = os.path.join( parent_folder.name, path )
parent_folder = parent_folder.parent
path += ldda.name
while path in seen:
path += '_'
seen.append( path )
zpath = os.path.split(path)[ -1 ] # comes as base_name/fname
outfname, zpathext = os.path.splitext( zpath )
if is_composite: # need to add all the components from the extra_files_path to the zip
if zpathext == '':
zpath = '%s.html' % zpath # fake the real nature of the html file
try:
if format == 'zip':
archive.add( ldda.dataset.file_name, zpath ) # add the primary of a composite set
else:
archive.add( ldda.dataset.file_name, zpath, check_file=True ) # add the primary of a composite set
except IOError:
log.exception( "Unable to add composite parent %s to temporary library download archive" % ldda.dataset.file_name )
raise exceptions.InternalServerError( "Unable to create archive for download." )
except ObjectNotFound:
log.exception( "Requested dataset %s does not exist on the host." % ldda.dataset.file_name )
raise exceptions.ObjectNotFound( "Requested dataset not found. " )
except Exception, e:
log.exception( "Unable to add composite parent %s to temporary library download archive" % ldda.dataset.file_name )
raise exceptions.InternalServerError( "Unable to add composite parent to temporary library download archive. " + str( e ) )
flist = glob.glob(os.path.join(ldda.dataset.extra_files_path, '*.*')) # glob returns full paths
for fpath in flist:
efp, fname = os.path.split(fpath)
if fname > '':
fname = fname.translate(trantab)
try:
if format == 'zip':
archive.add( fpath, fname )
else:
archive.add( fpath, fname, check_file=True )
except IOError:
log.exception( "Unable to add %s to temporary library download archive %s" % ( fname, outfname) )
raise exceptions.InternalServerError( "Unable to create archive for download." )
except ObjectNotFound:
log.exception( "Requested dataset %s does not exist on the host." % fpath )
raise exceptions.ObjectNotFound( "Requested dataset not found." )
except Exception, e:
log.exception( "Unable to add %s to temporary library download archive %s" % ( fname, outfname ) )
raise exceptions.InternalServerError( "Unable to add dataset to temporary library download archive . " + str( e ) )
else: # simple case
try:
if format == 'zip':
archive.add( ldda.dataset.file_name, path )
else:
archive.add( ldda.dataset.file_name, path, check_file=True )
except IOError:
log.exception( "Unable to write %s to temporary library download archive" % ldda.dataset.file_name )
raise exceptions.InternalServerError( "Unable to create archive for download" )
except ObjectNotFound:
log.exception( "Requested dataset %s does not exist on the host." % ldda.dataset.file_name )
raise exceptions.ObjectNotFound( "Requested dataset not found." )
except Exception, e:
log.exception( "Unable to add %s to temporary library download archive %s" % ( fname, outfname ) )
raise exceptions.InternalServerError( "Unknown error. " + str( e ) )
lname = 'selected_dataset'
fname = lname.replace( ' ', '_' ) + '_files'
if format == 'zip':
archive.close()
trans.response.set_content_type( "application/octet-stream" )
trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.%s"' % ( fname, outext )
archive = util.streamball.ZipBall( tmpf, tmpd )
archive.wsgi_status = trans.response.wsgi_status()
archive.wsgi_headeritems = trans.response.wsgi_headeritems()
return archive.stream
else:
trans.response.set_content_type( "application/x-tar" )
trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.%s"' % ( fname, outext )
archive.wsgi_status = trans.response.wsgi_status()
archive.wsgi_headeritems = trans.response.wsgi_headeritems()
return archive.stream
elif format == 'uncompressed':
if len(lddas) != 1:
raise exceptions.RequestParameterInvalidException( "You can download only one uncompressed file at once." )
else:
single_dataset = lddas[ 0 ]
trans.response.set_content_type( single_dataset.get_mime() )
fStat = os.stat( ldda.file_name )
trans.response.headers[ 'Content-Length' ] = int( fStat.st_size )
valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
fname = ldda.name
fname = ''.join( c in valid_chars and c or '_' for c in fname )[ 0:150 ]
trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s"' % fname
try:
return open( single_dataset.file_name )
except:
raise exceptions.InternalServerError( "This dataset contains no content." )
else:
raise exceptions.RequestParameterInvalidException( "Wrong format parameter specified" )
def _build_path( self, trans, folder ):
"""
Search the path upwards recursively and load the whole route of
names and ids for breadcrumb building purposes.
:param folder: current folder for navigating up
:param type: Galaxy LibraryFolder
:returns: list consisting of full path to the library
:type: list
"""
path_to_root = []
# We are almost in root
if folder.parent_id is None:
path_to_root.append( ( 'F' + trans.security.encode_id( folder.id ), folder.name ) )
else:
# We add the current folder and traverse up one folder.
path_to_root.append( ( 'F' + trans.security.encode_id( folder.id ), folder.name ) )
upper_folder = trans.sa_session.query( trans.app.model.LibraryFolder ).get( folder.parent_id )
path_to_root.extend( self._build_path( trans, upper_folder ) )
return path_to_root
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/lda_datasets.py
|
Python
|
gpl-3.0
| 38,213
|
[
"Galaxy"
] |
e6fb2f0aca517dfac2a217c23284b7b90c93da8a7b4aa665777cfb979c9509ac
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import array
import os
import struct
import subprocess
import sys
import threading
from shutil import copyfile
import bpy
from extensions_framework import util as efutil
from . import project_file_writer
from . import util
class RenderAppleseed(bpy.types.RenderEngine):
bl_idname = 'APPLESEED_RENDER'
bl_label = 'appleseed'
bl_use_preview = True
# This lock allows to serialize renders.
render_lock = threading.Lock()
def __init__(self):
pass
def update(self, data, scene):
pass
def render(self, scene):
with RenderAppleseed.render_lock:
if self.is_preview:
if not bpy.app.background:
self.__render_material_preview(scene)
elif self.is_animation:
frame_current = scene.frame_start
while frame_current <= scene.frame_end:
scene.frame_set(frame_current)
self.__render_scene(scene)
frame_current += scene.frame_step
else:
self.__render_scene(scene)
def __render_scene(self, scene):
"""
Export and render the scene.
"""
# Write project file and export meshes.
bpy.ops.appleseed.export()
if scene.appleseed.project_path == '':
self.report({'INFO'}, "Please first specify a project path in the appleseed render settings.")
return
# Make sure the project directory exists.
project_dir = util.realpath(scene.appleseed.project_path)
if not os.path.exists(project_dir):
try:
os.makedirs(project_dir)
except os.error:
self.report({"ERROR"}, "The project directory could not be created. Check directory permissions.")
return
# Build the file path for the appleseed project.
project_filepath = scene.name
if not project_filepath.endswith(".appleseed"):
project_filepath += ".appleseed"
project_filepath = os.path.join(project_dir, project_filepath)
# Render the project.
self.__render_project_file(scene, project_filepath)
def __render_material_preview(self, scene):
"""
Export and render the material preview scene.
"""
# Don't render material thumbnails.
(width, height) = util.get_render_resolution(scene)
if width <= 96:
return
# Collect objects and their materials in a object -> [materials] dictionary.
objects_materials = {}
for obj in (obj for obj in scene.objects if obj.is_visible(scene) and not obj.hide_render):
for mat in util.get_instance_materials(obj):
if mat is not None:
if obj.name not in objects_materials.keys():
objects_materials[obj] = []
objects_materials[obj].append(mat)
# Find objects that are likely to be the preview objects.
preview_objects = [o for o in objects_materials.keys() if o.name.startswith('preview')]
if not preview_objects:
return
# Find the materials attached to the likely preview object.
likely_materials = objects_materials[preview_objects[0]]
if not likely_materials:
return
# Build the path to the template preview project in the add-on directory.
preview_template_dir = os.path.join(os.sep.join(util.realpath(__file__).split(os.sep)[:-1]), "mat_preview")
# Build the path to the output preview project.
preview_output_dir = os.path.join(efutil.temp_directory(), "mat_preview")
preview_project_filepath = os.path.join(preview_output_dir, "mat_preview.appleseed")
# Copy preview scene assets.
if not os.path.isdir(preview_output_dir):
os.mkdir(preview_output_dir)
existing_files = os.listdir(preview_output_dir)
for item in os.listdir(preview_template_dir):
if item not in existing_files:
copyfile(os.path.join(preview_template_dir, item), os.path.join(preview_output_dir, item))
prev_mat = likely_materials[0]
prev_type = prev_mat.preview_render_type.lower()
# Export the project.
exporter = project_file_writer.Exporter()
file_written = exporter.export_preview(scene,
preview_project_filepath,
prev_mat,
prev_type,
width,
height)
if not file_written:
print('Error while exporting. Check the console for details.')
return
# Render the project.
self.__render_project_file(scene, preview_project_filepath)
def __render_project_file(self, scene, project_filepath):
# Get the absolute path to the executable directory.
as_bin_path = util.realpath(bpy.context.user_preferences.addons['blenderseed'].preferences.appleseed_bin_path)
if as_bin_path == '':
self.report({'ERROR'}, "The path to appleseed.cli executable has not been specified. Set the path in the add-on user preferences.")
return
appleseed_exe = os.path.join(as_bin_path, "appleseed.cli")
# If running Linux/macOS, add the binary path to environment.
if sys.platform != "win32":
os.environ['LD_LIBRARY_PATH'] = as_bin_path
# Compute render resolution.
(width, height) = util.get_render_resolution(scene)
# Compute render window.
x0, y0, x1, y1 = 0, 0, width, height
if scene.render.use_border:
x0 = int(scene.render.border_min_x * width)
x1 = int(scene.render.border_max_x * width)
y0 = height - int(scene.render.border_min_y * height)
y1 = height - int(scene.render.border_max_y * height)
# Launch appleseed.cli.
cmd = (appleseed_exe,
project_filepath,
'--to-stdout',
'--threads', str(scene.appleseed.threads),
'--message-verbosity', 'warning',
'--resolution', str(width), str(height),
'--window', str(x0), str(y0), str(x1), str(y1))
process = subprocess.Popen(cmd, cwd=as_bin_path, env=os.environ.copy(), stdout=subprocess.PIPE)
self.update_stats("", "appleseed: Rendering")
# Update while rendering.
while not self.test_break():
# Wait for the next chunk header from the process's stdout.
chunk_header_data = os.read(process.stdout.fileno(), 2 * 4)
if not chunk_header_data:
break
# Decode chunk header.
chunk_header = struct.unpack("II", chunk_header_data)
chunk_type = chunk_header[0]
chunk_size = chunk_header[1]
# Ignore unknown chunks.
# Known chunk types:
# 1 = tile, protocol version 1
if chunk_type != 1:
os.read(process.stdout.fileno(), chunk_size)
continue
# Read and decode tile header.
tile_header = struct.unpack("IIIII", os.read(process.stdout.fileno(), 5 * 4))
tile_x = tile_header[0]
tile_y = tile_header[1]
tile_w = tile_header[2]
tile_h = tile_header[3]
tile_c = tile_header[4]
# Read tile data.
tile_size = 4 * tile_w * tile_h * tile_c
tile_data = bytes()
while len(tile_data) < tile_size and not self.test_break():
tile_data += os.read(process.stdout.fileno(), tile_size - len(tile_data))
if self.test_break():
break
# Optional debug message.
if False:
print("Received tile: x={0} y={1} w={2} h={3} c={4}".format(tile_x, tile_y, tile_w, tile_h, tile_c))
# Convert tile data to the format expected by Blender.
floats = array.array('f')
floats.fromstring(tile_data)
pix = []
for y in range(tile_h - 1, -1, -1):
stride = tile_w * 4
start_index = y * stride
end_index = start_index + stride
pix.extend(floats[i:i + 4] for i in range(start_index, end_index, 4))
# Update image.
result = self.begin_result(tile_x, height - tile_y - tile_h, tile_w, tile_h)
layer = result.layers[0] if bpy.app.version < (2, 74, 4) else result.layers[0].passes[0]
layer.rect = pix
self.end_result(result)
# Make sure the appleseed.cli process has terminated.
process.kill()
|
jasperges/blenderseed
|
render.py
|
Python
|
mit
| 10,160
|
[
"VisIt"
] |
82177af98476ed518a452a483b491c801d6f23fe6d54d14e7c2244a0da78a4ad
|
# Copyright (C) 2014
# Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************************
espressopp.interaction.MirrorLennardJones
*****************************************
This class provides methods to compute forces and energies of
the Mirror Lennard-Jones potential.
.. math::
V(r) = V_{LJ}(r_m - |r-r_m|)
where :math:`V_{LJ}` is the 6-12 purely repulsive Lennard-Jones
potential. This potential is introduced in R.L.C. Akkermans, S. Toxvaerd
and & W. J. Briels. Molecular dynamics of polymer growth. The Journal of
Chemical Physics, 1998, 109, 2929-2940.
.. function:: espressopp.interaction.MirrorLennardJones(epsilon, sigma)
:param epsilon: (default: 1.0)
:param sigma: (default: 0.0)
:type epsilon: real
:type sigma: real
.. function:: espressopp.interaction.FixedPairListMirrorLennardJones(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListMirrorLennardJones.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedPairListMirrorLennardJones.getPotential()
:rtype:
.. function:: espressopp.interaction.FixedPairListMirrorLennardJones.setFixedPairList(fixedpairlist)
:param fixedpairlist:
:type fixedpairlist:
.. function:: espressopp.interaction.FixedPairListMirrorLennardJones.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_MirrorLennardJones, interaction_FixedPairListMirrorLennardJones
class MirrorLennardJonesLocal(PotentialLocal, interaction_MirrorLennardJones):
def __init__(self, epsilon=1.0, sigma=0.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_MirrorLennardJones, epsilon, sigma)
class FixedPairListMirrorLennardJonesLocal(InteractionLocal, interaction_FixedPairListMirrorLennardJones):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListMirrorLennardJones, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class MirrorLennardJones(Potential):
'The MirrorLennardJones potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.MirrorLennardJonesLocal',
pmiproperty = ['epsilon', 'sigma']
)
class FixedPairListMirrorLennardJones(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListMirrorLennardJonesLocal',
pmicall = ['setPotential','getPotential','setFixedPairList', 'getFixedPairList']
)
|
espressopp/espressopp
|
src/interaction/MirrorLennardJones.py
|
Python
|
gpl-3.0
| 4,881
|
[
"ESPResSo"
] |
01b20779b4ef5ddc2ff7db2d7e900a1fd82639912f8eb161803ae56199cbc0c4
|
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program PRANK.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../../..')
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, AbstractCommandline
class PrankCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program PRANK.
http://www.ebi.ac.uk/goldman-srv/prank/prank/
Example:
--------
To align a FASTA file (unaligned.fasta) with the output in aligned
FASTA format with the output filename starting with "aligned" (you
can't pick the filename explicitly), no tree output and no XML output,
use:
>>> from Bio.Align.Applications import PrankCommandline
>>> prank_cline = PrankCommandline(d="unaligned.fasta",
... o="aligned", # prefix only!
... f=8, # FASTA output
... notree=True, noxml=True)
>>> print(prank_cline)
prank -d=unaligned.fasta -o=aligned -f=8 -noxml -notree
You would typically run the command line with prank_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citations:
----------
Loytynoja, A. and Goldman, N. 2005. An algorithm for progressive
multiple alignment of sequences with insertions. Proceedings of
the National Academy of Sciences, 102: 10557--10562.
Loytynoja, A. and Goldman, N. 2008. Phylogeny-aware gap placement
prevents errors in sequence alignment and evolutionary analysis.
Science, 320: 1632.
Last checked against version: 081202
"""
def __init__(self, cmd="prank", **kwargs):
OUTPUT_FORMAT_VALUES = list(range(1, 18))
self.parameters = [
# ################# input/output parameters: ##################
# -d=sequence_file
_Option(["-d", "d"],
"Input filename",
filename=True,
is_required=True),
# -t=tree_file [default: no tree, generate approximate NJ tree]
_Option(["-t", "t"], "Input guide tree filename",
filename=True),
# -tree="tree_string" [tree in newick format; in double quotes]
_Option(["-tree", "tree"],
"Input guide tree as Newick string"),
# -m=model_file [default: HKY2/WAG]
_Option(["-m", "m"],
"User-defined alignment model filename. Default: "
"HKY2/WAG"),
# -o=output_file [default: 'output']
_Option(["-o", "o"],
"Output filenames prefix. Default: 'output'\n "
"Will write: output.?.fas (depending on requested "
"format), output.?.xml and output.?.dnd",
filename=True),
# -f=output_format [default: 8]
_Option(["-f", "f"],
"Output alignment format. Default: 8 FASTA\n"
"Option are:\n"
"1. IG/Stanford 8. Pearson/Fasta\n"
"2. GenBank/GB 11. Phylip3.2\n"
"3. NBRF 12. Phylip\n"
"4. EMBL 14. PIR/CODATA\n"
"6. DNAStrider 15. MSF\n"
"7. Fitch 17. PAUP/NEXUS",
checker_function=lambda x: x in OUTPUT_FORMAT_VALUES),
_Switch(["-noxml", "noxml"],
"Do not output XML files "
"(PRANK versions earlier than v.120626)"),
_Switch(["-notree", "notree"],
"Do not output dnd tree files "
"(PRANK versions earlier than v.120626)"),
_Switch(["-showxml", "showxml"],
"Output XML files (PRANK v.120626 and later)"),
_Switch(["-showtree", "showtree"],
"Output dnd tree files (PRANK v.120626 and later)"),
_Switch(["-shortnames", "shortnames"],
"Truncate names at first space"),
_Switch(["-quiet", "quiet"],
"Reduce verbosity"),
# ###################### model parameters: ######################
# +F [force insertions to be always skipped]
# -F [equivalent]
_Switch(["-F", "+F", "F"],
"Force insertions to be always skipped: same as +F"),
# -dots [show insertion gaps as dots]
_Switch(["-dots", "dots"],
"Show insertion gaps as dots"),
# -gaprate=# [gap opening rate; default: dna 0.025 / prot 0.0025]
_Option(["-gaprate", "gaprate"],
"Gap opening rate. Default: dna 0.025 prot 0.0025",
checker_function=lambda x: isinstance(x, float)),
# -gapext=# [gap extension probability; default: dna 0.5 / prot 0.5]
_Option(["-gapext", "gapext"],
"Gap extension probability. Default: dna 0.5 "
"/ prot 0.5",
checker_function=lambda x: isinstance(x, float)),
# -dnafreqs=#,#,#,# [ACGT; default: empirical]
_Option(["-dnafreqs", "dnafreqs"],
"DNA frequencies - 'A,C,G,T'. eg '25,25,25,25' as a quote "
"surrounded string value. Default: empirical",
checker_function=lambda x: isinstance(x, bytes)),
# -kappa=# [ts/tv rate ratio; default:2]
_Option(["-kappa", "kappa"],
"Transition/transversion ratio. Default: 2",
checker_function=lambda x: isinstance(x, int)),
# -rho=# [pur/pyr rate ratio; default:1]
_Option(["-rho", "rho"],
"Purine/pyrimidine ratio. Default: 1",
checker_function=lambda x: isinstance(x, int)),
# -codon [for DNA: use empirical codon model]
# Assuming this is an input file as in -m
_Option(["-codon", "codon"],
"Codon model filename. Default: empirical codon model"),
# -termgap [penalise terminal gaps normally]
_Switch(["-termgap", "termgap"],
"Penalise terminal gaps normally"),
# ############### other parameters: ################################
# -nopost [do not compute posterior support; default: compute]
_Switch(["-nopost", "nopost"],
"Do not compute posterior support. Default: compute"),
# -pwdist=# [expected pairwise distance for computing guidetree;
# default: dna 0.25 / prot 0.5]
_Option(["-pwdist", "pwdist"],
"Expected pairwise distance for computing guidetree. "
"Default: dna 0.25 / prot 0.5",
checker_function=lambda x: isinstance(x, float)),
_Switch(["-once", "once"],
"Run only once. Default: twice if no guidetree given"),
_Switch(["-twice", "twice"],
"Always run twice"),
_Switch(["-skipins", "skipins"],
"Skip insertions in posterior support"),
_Switch(["-uselogs", "uselogs"],
"Slower but should work for a greater number of sequences"),
_Switch(["-writeanc", "writeanc"],
"Output ancestral sequences"),
_Switch(["-printnodes", "printnodes"],
"Output each node; mostly for debugging"),
# -matresize=# [matrix resizing multiplier]
# Doesnt specify type but Float and Int work
_Option(["-matresize", "matresize"],
"Matrix resizing multiplier",
checker_function=lambda x: isinstance(x, float) or
isinstance(x, int)),
# -matinitsize=# [matrix initial size multiplier]
# Doesnt specify type but Float and Int work
_Option(["-matinitsize", "matinitsize"],
"Matrix initial size multiplier",
checker_function=lambda x: isinstance(x, float) or
isinstance(x, int)),
_Switch(["-longseq", "longseq"],
"Save space in pairwise alignments"),
_Switch(["-pwgenomic", "pwgenomic"],
"Do pairwise alignment, no guidetree"),
# -pwgenomicdist=# [distance for pairwise alignment; default: 0.3]
_Option(["-pwgenomicdist", "pwgenomicdist"],
"Distance for pairwise alignment. Default: 0.3",
checker_function=lambda x: isinstance(x, float)),
# -scalebranches=# [scale branch lengths; default: dna 1 / prot 2]
_Option(["-scalebranches", "scalebranches"],
"Scale branch lengths. Default: dna 1 / prot 2",
checker_function=lambda x: isinstance(x, int)),
# -fixedbranches=# [use fixed branch lengths]
# Assume looking for a float
_Option(["-fixedbranches", "fixedbranches"],
"Use fixed branch lengths of input value",
checker_function=lambda x: isinstance(x, float)),
# -maxbranches=# [set maximum branch length]
# Assume looking for a float
_Option(["-maxbranches", "maxbranches"],
"Use maximum branch lengths of input value",
checker_function=lambda x: isinstance(x, float)),
# -realbranches [disable branch length truncation]
_Switch(["-realbranches", "realbranches"],
"Disable branch length truncation"),
_Switch(["-translate", "translate"],
"Translate to protein"),
_Switch(["-mttranslate", "mttranslate"],
"Translate to protein using mt table"),
# ##################### other: ####################
_Switch(["-convert", "convert"],
"Convert input alignment to new format. Do "
"not perform alignment")
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running modules doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Align/Applications/_Prank.py
|
Python
|
gpl-2.0
| 10,813
|
[
"Biopython"
] |
37217374bb3bc85502a1dae3f0386fa74d65faf932170f4de3fa7fbf27e3f3ea
|
#!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
__author__ = "Ole Christian Weidner"
__copyright__ = "Copyright 2012, Ole Christian Weidner"
__license__ = "MIT"
import bliss.saga
from bliss.saga.Object import Object
from bliss.saga.Attributes import AttributeInterface
from bliss.utils.jobid import JobID
class Job(Object, AttributeInterface):
'''Loosely represents a SAGA job as defined in GFD.90
A 'Job' represents a running application instance, which may consist of one
or more processes. Jobs are created by submitting a Job description to
a Job submission system -- usually a queuing system, or some other service
which spawns jobs on the user's behalf.
Jobs have a unique ID (see get_job_id()), and are stateful entities -- their
'state' attribute changes according to a well defined state model:
A job as returned by job.Service.create(jd) is in 'New' state -- it is not
yet submitted to the job submission backend. Once it was submitted, via
run(), it will enter the 'Pending' state, where it waits to get actually
executed by the backend (e.g. waiting in a queue etc). Once the job is
actually executed, it enters the 'Running' state -- only in that state is
the job actually consuming resources (CPU, memory, ...).
Jobs can leave the 'Running' state in three different ways: they finish
successfully on their own ('Done'), they finish unsuccessfully on their own,
or get canceled by the job management backend ('Failed'), or they get
actively canceled by the user or the application ('Canceled').
The methods defined on the Job object serve two purposes: inspecting the
job's state, and initiating job state transitions.
'''
New = "saga.job.Job.New"
'''Indicates that the job hasn't been started yet'''
Pending = "saga.job.Job.Pending"
'''Indicates that the job is waiting to be executed (NOT IN GFD.90)'''
Running = "saga.job.Job.Running"
'''Indicates that the job is executing
Note that Bliss does not expose the 'Suspended' state -- Suspended jobs will
be reported as 'Running'.
'''
Done = "saga.job.Job.Done"
'''Indicates that the job has successfully executed'''
Failed = "saga.job.Job.Failed"
'''Indicates that the execution of the job has failed'''
Canceled = "saga.job.Job.Canceled"
'''Indicates that the job has been canceled either by the user or the system'''
Unknown = "saga.job.Job.Unknown"
'''Indicates that the job is in an unexpected state'''
######################################################################
##
def __init__(self):
'''PRIVATE Constructor (don't call explicitly!)'''
Object.__init__(self)
self._apitype = 'saga.job'
AttributeInterface.__init__(self)
# register properties with the attribute interface
self._register_ro_attribute(name="Exitcode",
accessor=self.__class__.exitcode)
self._register_ro_attribute(name="JobID",
accessor=self.__class__.jobid)
self._register_ro_attribute(name="ServiceURL",
accessor=self.__class__.serviceurl)
######################################################################
##
def __init_from_service(self, service_obj, job_desc):
'''Constructor'''
self._service = service_obj
self._url = service_obj._url
self._job_description = job_desc
self._plugin = Object._get_plugin(self) # throws 'NoSuccess' on error
self._logger.info("Bound to plugin %s" % (repr(self._plugin)))
######################################################################
##
def __del__(self):
'''Delete the job in a civilised fashion.'''
if self._plugin is not None:
self._plugin.unregister_job_object(self)
else:
pass # can't throw here
######################################################################
##
#def get_stderr(self):
# '''B{Not Implemented:} Bliss does not support I/O streaming. Please use
# file staging to retrieve stdout and stderr files.
# '''
# raise bliss.saga.Exception(bliss.saga.Error.NotImplemented,
# "Bliss doesn't suppport get_stderr()")
######################################################################
##
#def get_stdout(self):
# '''B{Not Implemented:} Bliss does not support I/O streaming.
# '''
# raise bliss.saga.Exception(bliss.saga.Error.NotImplemented,
# "Bliss doesn't suppport get_stdout()")
######################################################################
##
def get_description(self):
'''Return the job description this job was created from.
The returned description can be used to inspect job properties
(executable name, arguments, etc.). It can also be used to start
identical job instances.
The returned job description will in general reflect the actual state of
the running job, and is not necessarily a simple copy of the job
description which was used to create the job instance. For example, the
environment variables in the returned job description may reflect the
actual environment of the running job instance.
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j1 = js.create_job(jd)
j1.run()
j2 = js.create_job(j1.get_description())
j2.run()
'''
if self._plugin is not None:
jd = bliss.saga.job.Description._deep_copy(self._job_description)
return jd
else:
raise bliss.saga.Exception(bliss.saga.Error.NoSuccess,
"Object not bound to a plugin")
######################################################################
##
def get_state(self, obj=None, key='State'):
'''Return the current state of the job.
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j = js.create_job(jd)
if j.get_state() == saga.job.Job.New :
print "new"
else :
print "oops!"
j.run()
if j.get_state() == saga.job.Job.Pending :
print "pending"
elif j.get_state() == saga.job.Job.Running :
print "running"
else :
print "oops!"
'''
if self._plugin is not None:
return self._plugin.job_get_state(self)
else:
raise bliss.saga.Exception(bliss.saga.Error.NoSuccess,
"Object not bound to a plugin")
######################################################################
##
def get_job_id(self, obj=None, key="JobID") :
'''Return the identifier for the job.'''
if self._plugin is not None:
# This is a fix for https://github.com/saga-project/bliss/issues/38.
# If we see a JobID object that looks like [service]-[None], we just
# return 'None'. That's much easier than messing with every single plug-in
# This fix also deprecates JobID on API level and moves it into the util
# namespace where it can still be used within plug-in context.
jobid = self._plugin.job_get_job_id(self)
if type(jobid) == bliss.utils.jobid.JobID:
if jobid.native_id == None:
return None
else:
return jobid
else:
return jobid
else:
raise bliss.saga.Exception(bliss.saga.Error.NoSuccess,
"Object not bound to a plugin")
######################################################################
##
def run(self):
'''Execute the job via the associated job service.
Request that the job is being executed by the backend. If the backend
is accepting this run request, the job will move to the 'Pending' or
'Running' state -- otherwise this method will raise an error, and the
job will be moved to 'Failed'.
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j = js.create_job(jd)
if j.get_state() == saga.job.Job.New :
print "new"
else :
print "oops!"
j.run()
if j.get_state() == saga.job.Job.Pending :
print "pending"
elif j.get_state() == saga.job.Job.Running :
print "running"
else :
print "oops!"
'''
if self._plugin is not None:
return self._plugin.job_run(self)
else:
raise bliss.saga.Exception(bliss.saga.Error.NoSuccess,
"Object not bound to a plugin")
######################################################################
##
def cancel(self):
'''Cancel the execution of the job.
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j = js.create_job(jd)
if j.get_state() == saga.job.Job.New :
print "new"
else :
print "oops!"
j.run()
if j.get_state() == saga.job.Job.Pending :
print "pending"
elif j.get_state() == saga.job.Job.Running :
print "running"
else :
print "oops!"
j.cancel()
if j.get_state() == saga.job.Job.Canceled :
print "canceled"
else :
print "oops!"
'''
if self._plugin is not None:
return self._plugin.job_cancel(self)
else:
raise bliss.saga.Exception(bliss.saga.Error.NoSuccess,
"Object not bound to a plugin")
######################################################################
##
def wait(self, timeout=-1):
'''Wait for a running job to finish execution.
@param timeout: Timeout in seconds.
The optional timeout parameter specifies the time to wait, and accepts
the following values::
timeout < 0 : wait forever (block)
timeout == 0 : wait not at all (non-blocking test)
timeout > 0 : wait for 'timeout' seconds
On a non-negative timeout, the call can thus return even if the job is
not in final state, and the application should check the actual job
state. The default timeout value is '-1.0' (blocking).
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j = js.create_job(jd)
if j.get_state() == saga.job.Job.New :
print "new"
else :
print "oops!"
j.run()
if j.get_state() == saga.job.Job.Pending :
print "pending"
elif j.get_state() == saga.job.Job.Running :
print "running"
else :
print "oops!"
j.wait(-1.0)
if j.get_state() == saga.job.Job.Done :
print "done"
elif j.get_state() == saga.job.Job.Failed :
print "failed"
else :
print "oops!"
'''
if self._plugin is not None:
return self._plugin.job_wait(self, timeout)
else:
raise bliss.saga.Exception(bliss.saga.Error.NoSuccess,
"Object not bound to a plugin")
######################################################################
## Property:
def exitcode():
'''The job's exitcode.
this attribute is only meaningful if the job is in 'Done' or 'Final'
state - for all other job states, this attribute value is undefined.
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j = js.create_job(jd)
j.run()
j.wait()
if j.get_state() == saga.job.Job.Failed :
if j.exitcode == "42" :
print "Ah, galaxy bypass error!"
else :
print "oops!"
'''
def fget(self):
if self._plugin is not None:
return self._plugin.job_get_exitcode(self)
return locals()
exitcode = property(**exitcode())
######################################################################
## Property:
def jobid():
'''The job's identifier.
This attribute is equivalent to the value returned by job.get_job_id()
'''
def fget(self):
if self._plugin is not None:
return self._plugin.job_get_job_id(self)
return locals()
jobid = property(**jobid())
######################################################################
## Property:
def serviceurl():
'''The job's management URL.
This attribute is represents the URL under where the job management
service can be contacted which owns the job. The value is equivalent to
the service part of the job_id.
B{Example}::
js = saga.job.Service("fork://localhost")
jd = saga.job.Description ()
jd.executable = '/bin/date'
j = js.create_job(jd)
if j.serviceurl == "fork://localhost" :
print "yes!"
else :
print "oops!"
'''
doc = "The URL of the L{Service} instance managing this job."
def fget(self):
if self._plugin is not None:
return str(self._url)
return locals()
serviceurl = property(**serviceurl())
|
saga-project/bliss
|
bliss/saga/job/Job.py
|
Python
|
mit
| 14,254
|
[
"Galaxy"
] |
ae648edcbcb036b6092702cc1fd467e3152d26b37630acb8a101039a2cc44c44
|
# Written by Lauri Lehtovaara 2008
from gpaw.utilities.blas import axpy
from gpaw.utilities.blas import dotc
class MultiBlas:
def __init__(self, gd):
self.gd = gd
# Multivector ZAXPY: a x + y => y
def multi_zaxpy(self, a,x,y, nvec):
if isinstance(a, (float, complex)):
for i in range(nvec):
axpy(a*(1+0J), x[i], y[i])
else:
for i in range(nvec):
axpy(a[i]*(1.0+0.0J), x[i], y[i])
# Multivector dot product, a^H b, where ^H is transpose
def multi_zdotc(self, s, x,y, nvec):
for i in range(nvec):
s[i] = dotc(x[i],y[i])
self.gd.comm.sum(s)
return s
# Multiscale: a x => x
def multi_scale(self, a,x, nvec):
if isinstance(a, (float, complex)):
x *= a
else:
for i in range(nvec):
x[i] *= a[i]
# -------------------------------------------------------------------
import numpy as np
class BandPropertyMonitor:
def __init__(self, wfs, name, interval=1):
self.niter = 0
self.interval = interval
self.wfs = wfs
self.name = name
def __call__(self):
self.update(self.wfs)
self.niter += self.interval
def update(self, wfs):
#strictly serial XXX!
data_un = []
for u, kpt in enumerate(wfs.kpt_u):
data_n = getattr(kpt, self.name)
data_un.append(data_n)
self.write(np.array(data_un))
def write(self, data):
pass
class BandPropertyWriter(BandPropertyMonitor):
def __init__(self, filename, wfs, name, interval=1):
BandPropertyMonitor.__init__(self, wfs, name, interval)
self.fileobj = open(filename,'w')
def write(self, data):
self.fileobj.write(data.tostring())
self.fileobj.flush()
def __del__(self):
self.fileobj.close()
# -------------------------------------------------------------------
class StaticOverlapMonitor:
def __init__(self, wfs, wf_u, P_aui, interval=1):
self.niter = 0
self.interval = interval
self.wfs = wfs
self.wf_u = wf_u
self.P_aui = P_aui
def __call__(self):
self.update(self.wfs)
self.niter += self.interval
def update(self, wfs, calculate_P_ani=False):
#strictly serial XXX!
Porb_un = []
for u, kpt in enumerate(wfs.kpt_u):
swf = self.wf_u[u].ravel()
psit_n = kpt.psit_nG.reshape((len(kpt.f_n),-1))
Porb_n = np.dot(psit_n.conj(), swf) * wfs.gd.dv
P_ani = kpt.P_ani
if calculate_P_ani:
#wfs.pt.integrate(psit_nG, P_ani, kpt.q)
raise NotImplementedError('In case you were wondering, TODO XXX')
for a, P_ni in P_ani.items():
sP_i = self.P_aui[a][u]
for n in range(wfs.nbands):
for i in range(len(P_ni[0])):
for j in range(len(P_ni[0])):
Porb_n[n] += (P_ni[n][i].conj() *
wfs.setups[a].dO_ii[i][j] *
sP_i[j])
Porb_un.append(Porb_n)
self.write(np.array(Porb_un))
def write(self, data):
pass
class StaticOverlapWriter(StaticOverlapMonitor):
def __init__(self, filename, wfs, overlap, interval=1):
StaticOverlapMonitor.__init__(self, wfs, overlap, interval)
self.fileobj = open(filename,'w')
def write(self, data):
self.fileobj.write(data.tostring())
self.fileobj.flush()
def __del__(self):
self.fileobj.close()
# -------------------------------------------------------------------
class DynamicOverlapMonitor:
def __init__(self, wfs, overlap, interval=1):
self.niter = 0
self.interval = interval
self.setups = overlap.setups
self.operator = overlap.operator
self.wfs = wfs
def __call__(self):
self.update(self.wfs)
self.niter += self.interval
def update(self, wfs, calculate_P_ani=False):
#strictly serial XXX!
S_unn = []
for kpt in wfs.kpt_u:
psit_nG = kpt.psit_nG
P_ani = kpt.P_ani
if calculate_P_ani:
#wfs.pt.integrate(psit_nG, P_ani, kpt.q)
raise NotImplementedError('In case you were wondering, TODO XXX')
# Construct the overlap matrix:
S = lambda x: x
dS_aii = dict([(a, self.setups[a].dO_ii) for a in P_ani])
S_nn = self.operator.calculate_matrix_elements(psit_nG, P_ani,
S, dS_aii)
S_unn.append(S_nn)
self.write(np.array(S_unn))
def write(self, data):
pass
class DynamicOverlapWriter(DynamicOverlapMonitor):
def __init__(self, filename, wfs, overlap, interval=1):
DynamicOverlapMonitor.__init__(self, wfs, overlap, interval)
self.fileobj = open(filename,'w')
def write(self, data):
self.fileobj.write(data.tostring())
self.fileobj.flush()
def __del__(self):
self.fileobj.close()
|
qsnake/gpaw
|
gpaw/tddft/utils.py
|
Python
|
gpl-3.0
| 5,250
|
[
"GPAW"
] |
66954687947ed918a02d7d2c564edb86e5a225be3c7c4f60d5e4148ee474a2ff
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,invalid-name,too-many-public-methods,broad-except
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.geometry import CrystalStructure
class CrystalStructureTest(unittest.TestCase):
def test_creation(self):
# Some valid constructions
self.assertTrue(self.createCrystalStructureOrRaise("5.43 5.43 5.43", "F d -3 m", "Al 1/3 0.454 1/12 1.0 0.01"))
self.assertTrue(self.createCrystalStructureOrRaise("5.43 5.43 5.43", "C m m m", "Al 1/3 0.454 1/12 1.0 0.01;\n"
"Si 2/3 0.121 1/8"))
self.assertTrue(
self.createCrystalStructureOrRaise("5.43 5.43 5.43 90 90 120", "R -3 c", "Al 1/3 0.454 1/12 1.0 0.01;\n"
"Si 2/3 0.121 1/8"))
# Invalid unit cell specification
self.assertFalse(
self.createCrystalStructureOrRaise("5.43 5.43 5.43 90.0", "C m m m", "Al 1/3 0.454 1/12 1.0 0.01"))
# Invalid space group
self.assertFalse(
self.createCrystalStructureOrRaise("5.43 5.43 5.43", "INVALID", "Al 1/3 0.454 1/12 1.0 0.01"))
# Invalid atom specification
self.assertFalse(
self.createCrystalStructureOrRaise("5.43 5.43 5.43", "C m c e", "Al 1/3 0"))
def createCrystalStructureOrRaise(self, unitCell, spaceGroup, atomStrings):
try:
CrystalStructure(unitCell, spaceGroup, atomStrings)
return True
except Exception:
return False
def test_UnitCell(self):
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", "Al 1/3 0.454 1/12 1.0 0.01")
cell = structure.getUnitCell()
self.assertEqual(cell.a(), 5.43)
self.assertEqual(cell.b(), 5.42)
self.assertEqual(cell.c(), 5.41)
def test_SpaceGroup(self):
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", "Al 1/3 0.454 1/12 1.0 0.01")
spaceGroup = structure.getSpaceGroup()
self.assertEqual(spaceGroup.getHMSymbol(), "F d -3 m")
def test_scatterers(self):
initialString = "Al 1/3 0.454 1/12 1 0.01;Si 0.1 0.2 0.3 0.99 0.1"
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", initialString)
scatterers = structure.getScatterers()
self.assertEqual(';'.join(scatterers), initialString)
def test_to_string(self):
initialString = "Al 1/3 0.454 1/12 1 0.01;Si 0.1 0.2 0.3 0.99 0.1"
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", initialString)
expected_str = "Crystal structure with:\nUnit cell: a = 5.43 b = 5.42 "\
"c = 5.41 alpha = 90 beta = 90 gamma = 90\n"\
"Centering: All-face centred\nSpace Group: F d -3 m\n"\
"Scatterers: Al 1/3 0.454 1/12 1 0.01, "\
"Si 0.1 0.2 0.3 0.99 0.1"
expected_repr = "CrystalStructure(\"5.43 5.42 5.41 90 90 90\", "\
"\"F d -3 m\", \"Al 1/3 0.454 1/12 1 0.01; "\
"Si 0.1 0.2 0.3 0.99 0.1\")"
self.assertEqual(expected_str, str(structure))
self.assertEqual(expected_repr, structure.__repr__())
newStructure = eval(structure.__repr__())
self.assertEqual(structure.getUnitCell().a(), newStructure.getUnitCell().a())
if __name__ == '__main__':
unittest.main()
|
mganeva/mantid
|
Framework/PythonInterface/test/python/mantid/geometry/CrystalStructureTest.py
|
Python
|
gpl-3.0
| 3,785
|
[
"CRYSTAL"
] |
8f9dd6e07fd2725ee7accd92b626bc5840a166eedea3f6fad4c95aba8c473643
|
import zmq
from .back_queue import BackQueue
from .front_queue import FrontQueue
from multiprocessing import Process
import logging
from mosquito.messages import DataList
class Coordinator(object):
"""
Coordinator used to keep track of all the URLs seen and what URLs to crawl
next
"""
def __init__(self, roots, inbound_port, outbound_port, front_port,
back_port):
logger = logging.getLogger('coordinator')
hdlr = logging.FileHandler('coordinator.log')
formatter = logging.Formatter(
'%(asctime)-15s %(levelname)s : %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
# Initialize FrontQueue used to prioritize what urls to visit next
front_queue = Process(target=FrontQueue,
args=(inbound_port, front_port, roots))
front_queue.start()
# Initialize BackQueue used to be kind to all domains by limiting the
# amount of request sent to each domain at a time
batch_size = 1000
back_queue = Process(target=BackQueue,
args=(outbound_port, back_port, batch_size))
back_queue.start()
context = zmq.Context()
# Socket used to receive requests for new URLs from the BackQueue
self.back_server = context.socket(zmq.REP)
self.back_server.bind(back_port)
logger.info("Connected BackQueue to: {}".format(back_port))
# Socket used to request a new batch of URLs from the FrontQueue
self.front_client = context.socket(zmq.REQ)
self.front_client.connect(front_port)
logger.info("Connected FrontQueue to: {}".format(front_port))
while True:
# Wait for a request for urls from the BackQueue
request = self.back_server.recv_string()
# Check the request is proper
if not request or request != "REQUEST":
continue
# Forward the request to the FrontQueue to retrieve the URLs
self.front_client.send_string(request)
# Wait for the FrontQueue to respond
url_list = DataList(instance=self.front_client.recv())
# Check all URLs in the response and send them to the BackQueue
urls = self.check_urls(url_list)
if urls:
self.back_server.send(urls.encode())
front_queue.join()
back_queue.join()
def check_urls(self, urls):
return urls
|
MVilstrup/mosquito_1
|
mosquito/coordinator/__init__.py
|
Python
|
apache-2.0
| 2,557
|
[
"VisIt"
] |
692afda166860bb7b111fb7c9797377eedcfe4566805fffbffb0170b72a2a759
|
"""
set precomputation parameters
-----------------------------
Parameters to set the precomputations in order to facilitate the computation
model tasks
"""
max_r0 = 20.
max_r1 = 50.
S0 = 3.4105868102821946
S1 = 8.770781287761192
#### Parameters lists
pars_qvals = [{'codename': 'porportionaldates',
'pars': {'method': 'dates',
'pars': {'method': 'proportional', 'params': {}}}},
{'codename': 'tono_mag_diff',
'pars': {'method': 'financial',
'pars': {'method': 'diff_magnitude',
'params': {'methodname': 'raw_finance'}}}}
]
pars_pfeatures = [{'codename': 'raw_finance',
'pars': {'method': 'raw_finance', 'pars': {}}},
{'codename': 'raw_type_firms_cnae2',
'pars': {'method': 'raw_type_firms', 'pars': {'lvl': 2}}},
{'codename': 'tono_mag',
'pars': {'method': 'financial_magnitude', 'pars': {}}}
]
pars_pop = [{'codename': 'pst_gaus20',
'pars': {'method': 'pst',
'pars': {'ret': {'info_ret': max_r0},
'interpolation': {'f_weight': 'gaussian',
'pars_w': {'max_r': max_r0,
'S': S0},
'f_dens': 'weighted_avg',
'pars_d': {}}}}},
{'codename': 'pst_gaus50',
'pars': {'method': 'pst',
'pars': {'ret': {'info_ret': max_r1},
'interpolation': {'f_weight': 'gaussian',
'pars_w': {'max_r': max_r1,
'S': S1},
'f_dens': 'weighted_avg',
'pars_d': {}}}}}
]
pars_locs = [{'codename': 'ellipsoidal_proj',
'pars': {'method': 'ellipsoidal', 'radians': False}}]
pars_regs = [{'codename': 'regions', 'pars': {'columns': [0]}}]
|
tgquintela/Mscthesis
|
set_precomputationparameters.py
|
Python
|
mit
| 2,240
|
[
"Gaussian"
] |
fb710b481e37660f7f4433c3fd0d2f363293671d6e8c6319c313d336103827ff
|
#! /usr/bin/env python
from MDAnalysis import *
from MDAnalysis.analysis.align import rotation_matrix
import numpy
import math
u = Universe("init.pdb", "sampled.pos.pdb")
v = Universe("init.pdb")
mob1 = u.selectAtoms("segid A and resid 1:176")
ref1 = v.selectAtoms("segid A and resid 1:176")
f = open('rmsd_mda.dat','w')
for ts in u.trajectory:
R, rmsd1 = rotation_matrix(mob1.coordinates(),ref1.coordinates())
f.write('%7.3f\n' % rmsd1)
f.close()
|
demharters/git_scripts
|
rmsd_1mi5.py
|
Python
|
apache-2.0
| 472
|
[
"MDAnalysis"
] |
d10e9acc42d39c0db92bc6c485664af6563f8d1716925579f1b98bfc0b2fc222
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Author: frederic $
# $Date: 2016/07/12 13:50:29 $
# $Id: tide_funcs.py,v 1.4 2016/07/12 13:50:29 frederic Exp $
#
import sys
import time
import numpy as np
import pyfftw
import pylab as pl
import scipy as sp
from numba import jit
from scipy import fftpack, signal
import rapidtide.filter as tide_filt
import rapidtide.fit as tide_fit
import rapidtide.util as tide_util
fftpack = pyfftw.interfaces.scipy_fftpack
pyfftw.interfaces.cache.enable()
# this is here until numpy deals with their fft issue
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
# ---------------------------------------- Global constants -------------------------------------------
donotusenumba = False
donotbeaggressive = True
# ----------------------------------------- Conditional imports ---------------------------------------
def conditionaljit():
def resdec(f):
if donotusenumba:
return f
return jit(f, nopython=False)
return resdec
def conditionaljit2():
def resdec(f):
if donotusenumba or donotbeaggressive:
return f
return jit(f, nopython=False)
return resdec
def disablenumba():
global donotusenumba
donotusenumba = True
# --------------------------- Resampling and time shifting functions -------------------------------------------
"""
class ConvolutionGridder:
def __init__(self, timeaxis, width, method='gauss', circular=True, upsampleratio=100, doplot=False, debug=False):
self.upsampleratio = upsampleratio
self.initstep = timeaxis[1] - timeaxis[0]
self.initstart = timeaxis[0]
self.initend = timeaxis[-1]
self.hiresstep = self.initstep / np.float64(self.upsampleratio)
if method == 'gauss':
fullwidth = 2.355 * width
fullwidthpts = int(np.round(fullwidth / self.hiresstep, 0))
fullwidthpts += ((fullwidthpts % 2) - 1)
self.hires_x = np.linspace(-fullwidth / 2.0, fullwidth / 2.0, numpts = fullwidthpts, endpoint=True)
if method == 'gauss':
self.hires_y = tide_fit.gauss_eval(self.hires_x, np.array([1.0, 0.0, width])
if debug:
print(self.hires_x)
if doplot:
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_title('congrid convolution function')
pl.plot(self.hires_x, self.hires_y)
pl.legend(('input', 'hires'))
pl.show()
def gridded(xvals, yvals):
if len(xvals) != len(yvals):
print('x and y vectors do not match - aborting')
return None
for i in range(len(xvals)):
outindices = ((newtimeaxis - self.hiresstart) // self.hiresstep).astype(int)
"""
congridyvals = {}
congridyvals["kernel"] = "kaiser"
congridyvals["width"] = 3.0
def congrid(xaxis, loc, val, width, kernel="kaiser", cyclic=True, debug=False):
"""
Perform a convolution gridding operation with a Kaiser-Bessel or Gaussian kernel of width 'width'. Grid
parameters are cached for performance.
Parameters
----------
xaxis: array-like
The target axis for resampling
loc: float
The location, in x-axis units, of the sample to be gridded
val: float
The value to be gridded
width: float
The width of the gridding kernel in target bins
kernel: {'old', 'gauss', 'kaiser'}, optional
The type of convolution gridding kernel. Default is 'kaiser'.
cyclic: bool, optional
When True, gridding wraps around the endpoints of xaxis. Default is True.
debug: bool, optional
When True, output additional information about the gridding process
Returns
-------
vals: array-like
The input value, convolved with the gridding kernel, projected on to x axis points
weights: array-like
The values of convolution kernel, projected on to x axis points (used for normalization)
indices: array-like
The indices along the x axis where the vals and weights fall.
Notes
-----
See IEEE TRANSACTIONS ON MEDICAL IMAGING. VOL. IO.NO. 3, SEPTEMBER 1991
"""
global congridyvals
if (congridyvals["kernel"] != kernel) or (congridyvals["width"] != width):
if congridyvals["kernel"] != kernel:
if debug:
print(congridyvals["kernel"], "!=", kernel)
if congridyvals["width"] != width:
if debug:
print(congridyvals["width"], "!=", width)
if debug:
print("(re)initializing congridyvals")
congridyvals = {}
congridyvals["kernel"] = kernel
congridyvals["width"] = width * 1.0
optsigma = np.array([0.4241, 0.4927, 0.4839, 0.5063, 0.5516, 0.5695, 0.5682, 0.5974])
optbeta = np.array([1.9980, 2.3934, 3.3800, 4.2054, 4.9107, 5.7567, 6.6291, 7.4302])
xstep = xaxis[1] - xaxis[0]
if (loc < xaxis[0] - xstep / 2.0 or loc > xaxis[-1] + xstep / 2.0) and not cyclic:
print("loc", loc, "not in range", xaxis[0], xaxis[-1])
# choose the smoothing kernel based on the width
if kernel != "old":
if not (1.5 <= width <= 5.0) or (np.fmod(width, 0.5) > 0.0):
print("congrid: width is", width)
print("congrid: width must be a half-integral value between 1.5 and 5.0 inclusive")
sys.exit()
else:
kernelindex = int((width - 1.5) // 0.5)
# find the closest grid point to the target location, calculate relative offsets from this point
center = tide_util.valtoindex(xaxis, loc)
offset = np.fmod(np.round((loc - xaxis[center]) / xstep, 3), 1.0) # will vary from -0.5 to 0.5
if cyclic:
if center == len(xaxis) - 1 and offset > 0.5:
center = 0
offset -= 1.0
if center == 0 and offset < -0.5:
center = len(xaxis) - 1
offset += 1.0
if not (-0.5 <= offset <= 0.5):
print("(loc, xstep, center, offset):", loc, xstep, center, offset)
print("xaxis:", xaxis)
sys.exit()
offsetkey = str(offset)
if kernel == "old":
if debug:
print("gridding with old kernel")
widthinpts = int(np.round(width * 4.6 / xstep))
widthinpts -= widthinpts % 2 - 1
try:
yvals = congridyvals[offsetkey]
except KeyError:
if debug:
print("new key:", offsetkey)
xvals = (
np.linspace(
-xstep * (widthinpts // 2),
xstep * (widthinpts // 2),
num=widthinpts,
endpoint=True,
)
+ offset
)
congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, width]))
yvals = congridyvals[offsetkey]
startpt = int(center - widthinpts // 2)
indices = range(startpt, startpt + widthinpts)
indices = np.remainder(indices, len(xaxis))
if debug:
print("center, offset, indices, yvals", center, offset, indices, yvals)
return val * yvals, yvals, indices
else:
offsetinpts = center + offset
startpt = int(np.ceil(offsetinpts - width / 2.0))
endpt = int(np.floor(offsetinpts + width / 2.0))
indices = np.remainder(range(startpt, endpt + 1), len(xaxis))
try:
yvals = congridyvals[offsetkey]
except KeyError:
if debug:
print("new key:", offsetkey)
xvals = indices - center + offset
if kernel == "gauss":
sigma = optsigma[kernelindex]
congridyvals[offsetkey] = tide_fit.gauss_eval(xvals, np.array([1.0, 0.0, sigma]))
elif kernel == "kaiser":
beta = optbeta[kernelindex]
congridyvals[offsetkey] = tide_fit.kaiserbessel_eval(
xvals, np.array([beta, width / 2.0])
)
else:
print("illegal kernel value in congrid - exiting")
sys.exit()
yvals = congridyvals[offsetkey]
if debug:
print("xvals, yvals", xvals, yvals)
if debug:
print("center, offset, indices, yvals", center, offset, indices, yvals)
return val * yvals, yvals, indices
class FastResampler:
def __init__(
self,
timeaxis,
timecourse,
padtime=30.0,
upsampleratio=100,
doplot=False,
debug=False,
method="univariate",
):
self.upsampleratio = upsampleratio
self.padtime = padtime
self.initstep = timeaxis[1] - timeaxis[0]
self.initstart = timeaxis[0]
self.initend = timeaxis[-1]
self.hiresstep = self.initstep / np.float64(self.upsampleratio)
self.hires_x = np.arange(
timeaxis[0] - self.padtime,
self.initstep * len(timeaxis) + self.padtime,
self.hiresstep,
)
self.hiresstart = self.hires_x[0]
self.hiresend = self.hires_x[-1]
if method == "poly":
self.hires_y = 0.0 * self.hires_x
self.hires_y[
int(self.padtime // self.hiresstep)
+ 1 : -(int(self.padtime // self.hiresstep) + 1)
] = signal.resample_poly(timecourse, np.int(self.upsampleratio * 10), 10)
elif method == "fourier":
self.hires_y = 0.0 * self.hires_x
self.hires_y[
int(self.padtime // self.hiresstep)
+ 1 : -(int(self.padtime // self.hiresstep) + 1)
] = signal.resample(timecourse, self.upsampleratio * len(timeaxis))
else:
self.hires_y = doresample(timeaxis, timecourse, self.hires_x, method=method)
self.hires_y[: int(self.padtime // self.hiresstep)] = self.hires_y[
int(self.padtime // self.hiresstep)
]
self.hires_y[-int(self.padtime // self.hiresstep) :] = self.hires_y[
-int(self.padtime // self.hiresstep)
]
if debug:
print("FastResampler __init__:")
print(" padtime:, ", self.padtime)
print(" initstep, hiresstep:", self.initstep, self.hiresstep)
print(" initial axis limits:", self.initstart, self.initend)
print(" hires axis limits:", self.hiresstart, self.hiresend)
# self.hires_y[:int(self.padtime // self.hiresstep)] = 0.0
# self.hires_y[-int(self.padtime // self.hiresstep):] = 0.0
if doplot:
import matplolib.pyplot as pl
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_title("FastResampler initial timecourses")
pl.plot(timeaxis, timecourse, self.hires_x, self.hires_y)
pl.legend(("input", "hires"))
pl.show()
def yfromx(self, newtimeaxis, doplot=False, debug=False):
if debug:
print("FastResampler: yfromx called with following parameters")
print(" padtime:, ", self.padtime)
print(" initstep, hiresstep:", self.initstep, self.hiresstep)
print(" initial axis limits:", self.initstart, self.initend)
print(" hires axis limits:", self.hiresstart, self.hiresend)
print(" requested axis limits:", newtimeaxis[0], newtimeaxis[-1])
outindices = ((newtimeaxis - self.hiresstart) // self.hiresstep).astype(int)
if debug:
print("len(self.hires_y):", len(self.hires_y))
try:
out_y = self.hires_y[outindices]
except IndexError:
print("")
print("indexing out of bounds in FastResampler")
print(" padtime:, ", self.padtime)
print(" initstep, hiresstep:", self.initstep, self.hiresstep)
print(" initial axis limits:", self.initstart, self.initend)
print(" hires axis limits:", self.hiresstart, self.hiresend)
print(" requested axis limits:", newtimeaxis[0], newtimeaxis[-1])
sys.exit()
if doplot:
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_title("FastResampler timecourses")
pl.plot(self.hires_x, self.hires_y, newtimeaxis, out_y)
pl.legend(("hires", "output"))
pl.show()
return out_y
def doresample(orig_x, orig_y, new_x, method="cubic", padlen=0, antialias=False, debug=False):
"""
Resample data from one spacing to another. By default, does not apply any antialiasing filter.
Parameters
----------
orig_x
orig_y
new_x
method
padlen
Returns
-------
"""
tstep = orig_x[1] - orig_x[0]
if padlen > 0:
rawxpad = np.linspace(0.0, padlen * tstep, num=padlen, endpoint=False)
frontpad = rawxpad + orig_x[0] - padlen * tstep
backpad = rawxpad + orig_x[-1] + tstep
pad_x = np.concatenate((frontpad, orig_x, backpad))
pad_y = tide_filt.padvec(orig_y, padlen=padlen)
else:
pad_x = orig_x
pad_y = orig_y
if debug:
print("padlen=", padlen)
print("tstep=", tstep)
print("lens:", len(pad_x), len(pad_y))
print(pad_x)
print(pad_y)
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_title("Original and padded vector")
pl.plot(orig_x, orig_y + 1.0, pad_x, pad_y)
pl.show()
# antialias and ringstop filter
init_freq = len(pad_x) / (pad_x[-1] - pad_x[0])
final_freq = len(new_x) / (new_x[-1] - new_x[0])
if antialias and (init_freq > final_freq):
aafilterfreq = final_freq / 2.0
aafilter = tide_filt.NoncausalFilter(filtertype="arb", transferfunc="trapezoidal")
aafilter.setfreqs(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq)
pad_y = aafilter.apply(init_freq, pad_y)
if method == "cubic":
cj = signal.cspline1d(pad_y)
# return tide_filt.unpadvec(
# np.float64(signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
return signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])
elif method == "quadratic":
qj = signal.qspline1d(pad_y)
# return tide_filt.unpadvec(
# np.float64(signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
return signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])
elif method == "univariate":
interpolator = sp.interpolate.UnivariateSpline(pad_x, pad_y, k=3, s=0) # s=0 interpolates
# return tide_filt.unpadvec(np.float64(interpolator(new_x)), padlen=padlen)
return np.float64(interpolator(new_x))
else:
print("invalid interpolation method")
return None
def arbresample(
inputdata,
init_freq,
final_freq,
intermed_freq=0.0,
method="univariate",
antialias=True,
decimate=False,
debug=False,
):
"""
Parameters
----------
inputdata
init_freq
final_freq
intermed_freq
method
antialias
decimate
debug
Returns
-------
"""
if debug:
print("arbresample - initial points:", len(inputdata))
if decimate:
if final_freq > init_freq:
# upsample only
upsampled = upsample(inputdata, init_freq, final_freq, method=method, debug=debug)
if debug:
print("arbresample - upsampled points:", len(upsampled))
return upsampled
elif final_freq < init_freq:
# downsampling, so upsample by an amount that allows integer decimation
intermed_freq = final_freq * np.ceil(init_freq / final_freq)
q = int(intermed_freq // final_freq)
if debug:
print(
"going from",
init_freq,
"to",
final_freq,
": upsampling to",
intermed_freq,
"Hz, then decimating by,",
q,
)
if intermed_freq == init_freq:
upsampled = inputdata
else:
upsampled = upsample(
inputdata, init_freq, intermed_freq, method=method, debug=debug
)
if debug:
print("arbresample - upsampled points:", len(upsampled))
if antialias:
downsampled = signal.decimate(upsampled, q)
if debug:
print("arbresample - downsampled points:", len(downsampled))
return downsampled
else:
initaxis = np.linspace(0, len(upsampled), len(upsampled), endpoint=False)
print(len(initaxis), len(upsampled))
f = sp.interpolate.interp1d(initaxis, upsampled)
downsampled = f(
q // 2
+ q * np.linspace(0, len(upsampled) // q, len(upsampled) // q, endpoint=False)
)
return downsampled
else:
if debug:
print("arbresample - final points:", len(inputdata))
return inputdata
else:
if intermed_freq <= 0.0:
intermed_freq = np.max([2.0 * init_freq, 2.0 * final_freq])
orig_x = (1.0 / init_freq) * np.linspace(
0.0, 1.0 * len(inputdata), len(inputdata), endpoint=False
)
resampled = dotwostepresample(
orig_x,
inputdata,
intermed_freq,
final_freq,
method=method,
antialias=antialias,
debug=debug,
)
if debug:
print("arbresample - resampled points:", len(resampled))
return resampled
def upsample(inputdata, Fs_init, Fs_higher, method="univariate", intfac=False, debug=False):
starttime = time.time()
if Fs_higher <= Fs_init:
print("upsample: target frequency must be higher than initial frequency")
sys.exit()
# upsample
orig_x = np.linspace(0.0, (1.0 / Fs_init) * len(inputdata), num=len(inputdata), endpoint=False)
endpoint = orig_x[-1] - orig_x[0]
ts_higher = 1.0 / Fs_higher
numresamppts = int(endpoint // ts_higher + 1)
if intfac:
numresamppts = int(Fs_higher // Fs_init) * len(inputdata)
else:
numresamppts = int(endpoint // ts_higher + 1)
upsampled_x = np.arange(0.0, ts_higher * numresamppts, ts_higher)
upsampled_y = doresample(orig_x, inputdata, upsampled_x, method=method)
initfilter = tide_filt.NoncausalFilter(
filtertype="arb", transferfunc="trapezoidal", debug=debug
)
stopfreq = np.min([1.1 * Fs_init / 2.0, Fs_higher / 2.0])
initfilter.setfreqs(0.0, 0.0, Fs_init / 2.0, stopfreq)
upsampled_y = initfilter.apply(Fs_higher, upsampled_y)
if debug:
print("upsampling took", time.time() - starttime, "seconds")
return upsampled_y
def dotwostepresample(
orig_x,
orig_y,
intermed_freq,
final_freq,
method="univariate",
antialias=True,
debug=False,
):
"""
Parameters
----------
orig_x
orig_y
intermed_freq
final_freq
method
debug
Returns
-------
resampled_y
"""
if intermed_freq <= final_freq:
print("intermediate frequency must be higher than final frequency")
sys.exit()
# upsample
starttime = time.time()
endpoint = orig_x[-1] - orig_x[0]
init_freq = len(orig_x) / endpoint
intermed_ts = 1.0 / intermed_freq
numresamppts = int(endpoint // intermed_ts + 1)
intermed_x = intermed_ts * np.linspace(0.0, 1.0 * numresamppts, numresamppts, endpoint=False)
intermed_y = doresample(orig_x, orig_y, intermed_x, method=method)
if debug:
print(
"init_freq, intermed_freq, final_freq:",
init_freq,
intermed_freq,
final_freq,
)
print("intermed_ts, numresamppts:", intermed_ts, numresamppts)
print("upsampling took", time.time() - starttime, "seconds")
# antialias and ringstop filter
if antialias:
starttime = time.time()
aafilterfreq = np.min([final_freq, init_freq]) / 2.0
aafilter = tide_filt.NoncausalFilter(
filtertype="arb", transferfunc="trapezoidal", debug=debug
)
aafilter.setfreqs(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq)
antialias_y = aafilter.apply(intermed_freq, intermed_y)
if debug:
print("antialiasing took", time.time() - starttime, "seconds")
else:
antialias_y = intermed_y
# downsample
starttime = time.time()
final_ts = 1.0 / final_freq
numresamppts = int(np.ceil(endpoint / final_ts))
# final_x = np.arange(0.0, final_ts * numresamppts, final_ts)
final_x = final_ts * np.linspace(0.0, 1.0 * numresamppts, numresamppts, endpoint=False)
resampled_y = doresample(intermed_x, antialias_y, final_x, method=method)
if debug:
print("downsampling took", time.time() - starttime, "seconds")
return resampled_y
def calcsliceoffset(sotype, slicenum, numslices, tr, multiband=1):
"""
Parameters
----------
sotype
slicenum
numslices
tr
multiband
Returns
-------
"""
# Slice timing correction
# 0 : None
# 1 : Regular up (0, 1, 2, 3, ...)
# 2 : Regular down
# 3 : Use slice order file
# 4 : Use slice timings file
# 5 : Standard Interleaved (0, 2, 4 ... 1, 3, 5 ... )
# 6 : Siemens Interleaved (0, 2, 4 ... 1, 3, 5 ... for odd number of slices)
# (1, 3, 5 ... 0, 2, 4 ... for even number of slices)
# 7 : Siemens Multiband Interleaved
# default value of zero
slicetime = 0.0
# None
if sotype == 0:
slicetime = 0.0
# Regular up
if type == 1:
slicetime = slicenum * (tr / numslices)
# Regular down
if sotype == 2:
slicetime = (numslices - slicenum - 1) * (tr / numslices)
# Slice order file not supported - do nothing
if sotype == 3:
slicetime = 0.0
# Slice timing file not supported - do nothing
if sotype == 4:
slicetime = 0.0
# Standard interleave
if sotype == 5:
if (slicenum % 2) == 0:
# even slice number
slicetime = (tr / numslices) * (slicenum / 2)
else:
# odd slice number
slicetime = (tr / numslices) * ((numslices + 1) / 2 + (slicenum - 1) / 2)
# Siemens interleave format
if sotype == 6:
if (numslices % 2) == 0:
# even number of slices - slices go 1,3,5,...,0,2,4,...
if (slicenum % 2) == 0:
# even slice number
slicetime = (tr / numslices) * (numslices / 2 + slicenum / 2)
else:
# odd slice number
slicetime = (tr / numslices) * ((slicenum - 1) / 2)
else:
# odd number of slices - slices go 0,2,4,...,1,3,5,...
if (slicenum % 2) == 0:
# even slice number
slicetime = (tr / numslices) * (slicenum / 2)
else:
# odd slice number
slicetime = (tr / numslices) * ((numslices + 1) / 2 + (slicenum - 1) / 2)
# Siemens multiband interleave format
if sotype == 7:
numberofshots = numslices / multiband
modslicenum = slicenum % numberofshots
if (numberofshots % 2) == 0:
# even number of shots - slices go 1,3,5,...,0,2,4,...
if (modslicenum % 2) == 0:
# even slice number
slicetime = (tr / numberofshots) * (numberofshots / 2 + modslicenum / 2)
else:
# odd slice number
slicetime = (tr / numberofshots) * ((modslicenum - 1) / 2)
else:
# odd number of slices - slices go 0,2,4,...,1,3,5,...
if (modslicenum % 2) == 0:
# even slice number
slicetime = (tr / numberofshots) * (modslicenum / 2)
else:
# odd slice number
slicetime = (tr / numberofshots) * (
(numberofshots + 1) / 2 + (modslicenum - 1) / 2
)
return slicetime
# NB: a positive value of shifttrs delays the signal, a negative value advances it
# timeshift using fourier phase multiplication
def timeshift(inputtc, shifttrs, padtrs, doplot=False, debug=False):
"""
Parameters
----------
inputtc
shifttrs
padtrs
doplot
Returns
-------
"""
# set up useful parameters
thelen = np.shape(inputtc)[0]
thepaddedlen = thelen + 2 * padtrs
if debug:
print("timesshift: thelen, padtrs, thepaddedlen=", thelen, padtrs, thepaddedlen)
imag = 1.0j
# initialize variables
preshifted_y = np.zeros(
thepaddedlen, dtype="float"
) # initialize the working buffer (with pad)
weights = np.zeros(thepaddedlen, dtype="float") # initialize the weight buffer (with pad)
# now do the math
preshifted_y[padtrs : padtrs + thelen] = inputtc[:] # copy initial data into shift buffer
weights[padtrs : padtrs + thelen] = 1.0 # put in the weight vector
revtc = inputtc[::-1] # reflect data around ends to
preshifted_y[0:padtrs] = revtc[-padtrs:] # eliminate discontinuities
preshifted_y[padtrs + thelen :] = revtc[0:padtrs]
# finish initializations
fftlen = np.shape(preshifted_y)[0]
# create the phase modulation timecourse
initargvec = np.arange(0.0, 2.0 * np.pi, 2.0 * np.pi / float(fftlen)) - np.pi
if len(initargvec) > fftlen:
initargvec = initargvec[:fftlen]
argvec = np.roll(initargvec * shifttrs, -int(fftlen // 2))
modvec = np.cos(argvec) - imag * np.sin(argvec)
# process the data (fft->modulate->ifft->filter)
fftdata = fftpack.fft(preshifted_y) # do the actual shifting
shifted_y = fftpack.ifft(modvec * fftdata).real
# process the weights
w_fftdata = fftpack.fft(weights) # do the actual shifting
shifted_weights = fftpack.ifft(modvec * w_fftdata).real
if doplot:
xvec = range(0, thepaddedlen) # make a ramp vector (with pad)
print("shifttrs:", shifttrs)
print("offset:", padtrs)
print("thelen:", thelen)
print("thepaddedlen:", thepaddedlen)
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_title("Initial vector")
pl.plot(xvec, preshifted_y)
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_title("Initial and shifted vector")
pl.plot(xvec, preshifted_y, xvec, shifted_y)
pl.show()
return [
shifted_y[padtrs : padtrs + thelen],
shifted_weights[padtrs : padtrs + thelen],
shifted_y,
shifted_weights,
]
def timewarp(orig_x, orig_y, timeoffset, demean=True, method="univariate", debug=False):
if demean:
demeanedoffset = timeoffset - np.mean(timeoffset)
if debug:
print("mean delay of ", np.mean(timeoffset), "seconds removed prior to resampling")
else:
demeanedoffset = timeoffset
sampletime = orig_x[1] - orig_x[0]
maxdevs = (np.min(demeanedoffset), np.max(demeanedoffset))
maxsamps = maxdevs / sampletime
padlen = np.min([int(len(orig_x) // 2), int(30.0 / sampletime)])
if debug:
print("maximum deviation in samples:", maxsamps)
print("padlen in samples:", padlen)
return doresample(orig_x, orig_y, orig_x + demeanedoffset, method=method, padlen=padlen)
|
bbfrederick/rapidtide
|
rapidtide/resample.py
|
Python
|
apache-2.0
| 28,351
|
[
"Gaussian"
] |
507201c93c079d3d8ed4d63a263d5f047df5a57c4b39cb12b72e8ff65e1c69f6
|
# -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
bozhang@nao.cas.cn
Created on
----------
- Fri Nov 25 12:53:24 2016
Modifications
-------------
-
Aims
----
- utils for apertures
"""
import numpy as np
from joblib import Parallel, delayed
from scipy.interpolate import interp1d
from scipy.signal import medfilt2d
from skimage.filters import gaussian, median
from skimage.morphology import disk
from . import ccdproc_mod as ccdproc
import matplotlib.pyplot as plt
from copy import copy
from .normalization import normalize_spectra_block
from scipy.interpolate import interp1d
from scipy.signal import medfilt2d
# ################################# #
# find starting points
# ################################# #
def find_local_maximas(pixels, n_adj=2, n_smooth=1, n_sep=1):
""" find all local maximas
Parameters
----------
pixels:
a slice of CCD pixels
n_adj:
number of adjacent pixels for maximas
n_smooth:
running mean smooth window width
n_sep:
the separation since last maxima
Returns
-------
ind of maximas
"""
pixels = np.array(pixels).flatten()
n_pix = len(pixels)
ind_max = np.zeros_like(pixels, dtype=bool)
# smooth
for i in np.arange(n_smooth, n_pix - n_smooth):
pixels[i] = np.mean(np.array(pixels[i - n_smooth:i + 1 + n_smooth]))
# find maximas
for i in np.arange(n_adj, n_pix - n_adj):
ind_max[i] = np.argmax(
np.array(pixels[i - n_adj:i + 1 + n_adj])) == n_adj and np.sum(
ind_max[i - n_sep:i]) < 1
return ind_max
def find_local_minimas(pixels, n_adj=2, n_smooth=1, n_sep=10):
""" find all local minimas
Parameters
----------
pixels:
a slice of CCD pixels
n_adj:
number of adjacent pixels for minimas
n_smooth:
running mean smooth window width
n_sep:
the separation since last minima
Returns
-------
ind of minimas
"""
pixels = np.array(pixels).flatten()
n_pix = len(pixels)
ind_min = np.zeros_like(pixels, dtype=bool)
# smooth
for i in np.arange(n_smooth, n_pix - n_smooth):
pixels[i] = np.mean(np.array(pixels[i - n_smooth:i + 1 + n_smooth]))
# find minimas
for i in np.arange(n_adj, n_pix - n_adj):
ind_min[i] = np.argmin(
np.array(pixels[i - n_adj:i + 1 + n_adj])) == n_adj and np.sum(
ind_min[i - n_sep:i]) < 1
return ind_min
def find_mmax_mmin(im, start_col=2100, n_adj=7, n_smooth=3, n_sep=5,
verbose=False):
""" find reasonable maximas and minimas (cross-kick)
Parameters
----------
im
start_col
Returns
-------
"""
start_col_slice = np.sum(im[:, start_col][:, None], axis=1)
imax = find_local_maximas(start_col_slice, n_adj=n_adj, n_smooth=n_smooth,
n_sep=n_sep)
imin = find_local_minimas(start_col_slice, n_adj=n_adj, n_smooth=n_smooth,
n_sep=n_sep)
# no pixel could be both max&min
try:
assert np.sum(imax * imin) == 0
except:
raise (ValueError("@TWODSPEC: imax*imin != 0"))
# cross kick
smax = np.where(imax)[0]
smin = np.where(imin)[0]
smmax = []
smmin = []
for i in range(len(smax) - 1):
imin_this = imin[smax[i]:smax[i + 1]] # ind array for local mins
smin_this = np.where(imin_this)[0] # sub array for local mins
if len(smin_this) == 1:
smmin.append(smin_this[0] + smax[i])
elif len(smin_this) > 1:
# more than 1 local minimas, find the minimal minima
smmin.append(smin_this[np.argmin(
start_col_slice[smax[i]:smax[i + 1]][smin_this])] + smax[i])
# smin = smmin
for i in range(len(smmin) - 1):
imax_this = imax[smmin[i]:smmin[i + 1]] # ind array for local maxs
smax_this = np.where(imax_this)[0] # sub array for local maxs
if len(smax_this) == 1:
smmax.append(smax_this[0] + smmin[i])
elif len(smax_this) > 1:
# more than 1 local maximas, find the maximal maxima
smmax.append(smax_this[np.argmax(
start_col_slice[smmin[i]:smmin[i + 1]][smax_this])] + smmin[i])
if verbose:
print("@TWODSPEC: %s possible apertures found!" % len(smmax))
return smmax, smmin
# ################################# #
# find apertures
# ################################# #
def find_apertures(im, start_col=2100, max_drift=5, max_apwidth=10,
n_pix_goodap=1500, n_adj=7, n_smooth=3, n_sep=5, c=3,
verbose=False):
""" find apertures from image
Parameters
----------
im:
image
start_col:
start column
max_drift:
max_drift in finding an aperture
max_apwidth:
local comparison width
n_pix_goodap:
a good aperture should be more than this number of pixels
Returns
-------
ymmax_goodap: ndarray
the y-pixel values for good apertures
"""
# gaussian smooth if c>0
if c > 0:
im = ccdproc.CCDData(gaussian(im, sigma=(c, 0)), unit=im.unit)
# find max & min
smmax, smmin = find_mmax_mmin(im, start_col=start_col, n_adj=n_adj,
n_smooth=n_smooth, n_sep=n_sep)
# initialize results
ymmax = np.zeros((len(smmax), im.shape[1]))
ymmax[:, start_col] = smmax
ymmin = np.zeros((len(smmin), im.shape[1]))
ymmin[:, start_col] = smmin
# tracing apertures
for i_ap in range(ymmax.shape[0]):
for i_col in np.arange(start_col + 1, im.shape[1]):
y0 = ymmax[i_ap, i_col - 1]
# print(np.int(np.max((0, y0 - max_apwidth))))
# print(np.int(np.min((im.shape[0], y0 + 1 + max_apwidth))))
# print(np.int(i_col))
# print(np.int(np.max((0, y0 - max_apwidth))))
# print(np.int(np.min((im.shape[0], y0 + 1 + max_apwidth))))
# print(np.int(i_col - 1))
# print(y0)
# print(max_apwidth)
# print("PROD", im[np.int(np.max((0, y0 - max_apwidth))):np.int(np.min(
# (im.shape[0], y0 + 1 + max_apwidth))), np.int(i_col)].data *
# im[np.int(np.max((0, y0 - max_apwidth))):np.int(np.min(
# (im.shape[0], y0 + 1 + max_apwidth))),
# np.int(i_col - 1)].data + y0 - max_apwidth)
y1 = np.argmax(im[np.int(np.max((0, y0 - max_apwidth))):np.int(np.min(
(im.shape[0], y0 + 1 + max_apwidth))), np.int(i_col)].data *
im[np.int(np.max((0, y0 - max_apwidth))):np.int(np.min(
(im.shape[0], y0 + 1 + max_apwidth))),
np.int(i_col - 1)].data) + y0 - max_apwidth
if np.abs(y1 - y0) < max_drift:
# good ap, continue
ymmax[i_ap, i_col] = y1
else:
break
for i_col in np.arange(start_col - 1, 0, -1):
y0 = ymmax[i_ap, i_col + 1]
y1 = np.argmax(im[np.max((0, y0 - max_apwidth)):np.min(
(im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
im[np.max((0, y0 - max_apwidth)):np.min(
(im.shape[0], y0 + 1 + max_apwidth)),
i_col + 1].data) + y0 - max_apwidth
if np.abs(y1 - y0) < max_drift:
# good ap, continue
ymmax[i_ap, i_col] = y1
else:
break
if verbose:
print("@TWODSPEC: tracing aperture [%s] " % i_ap)
#
# for i_ap in range(ymmin.shape[0]):
# for i_col in np.arange(start_col + 1, im.shape[1]):
# y0 = ymmin[i_ap, i_col - 1]
# y1 = np.argmin(im[np.max((0, y0 - max_apwidth)):np.min(
# (im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
# im[np.max((0, y0 - max_apwidth)):np.min(
# (im.shape[0], y0 + 1 + max_apwidth)),
# i_col - 1].data) + y0 - max_apwidth
# if np.abs(y1 - y0) < max_drift:
# # good ap, continue
# ymmin[i_ap, i_col] = y1
# else:
# break
# for i_col in np.arange(start_col - 1, 0, -1):
# y0 = ymmin[i_ap, i_col + 1]
# y1 = np.argmin(im[np.max((0, y0 - max_apwidth)):np.min(
# (im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
# im[np.max((0, y0 - max_apwidth)):np.min(
# (im.shape[0], y0 + 1 + max_apwidth)),
# i_col + 1].data) + y0 - max_apwidth
# if np.abs(y1 - y0) < max_drift:
# # good ap, continue
# ymmin[i_ap, i_col] = y1
# else:
# break
# print i_ap
ind_goodap = np.sum(ymmax > 0, axis=1) > n_pix_goodap
ymmax_goodap = ymmax[ind_goodap, :]
print("@TWODSPEC: number of good aps (max) = %s " % np.sum(ind_goodap))
# ind_goodap = np.sum(ymmax>0, axis=1)>1000
# ymmin_goodap = ymmin[ind_goodap, :]
# print("@TWODSPEC: gumber of good aps (min)", np.sum(ind_goodap))
return ymmax_goodap # , ymmin_goodap
# ################################# #
# combine & group apertures
# ################################# #
def combine_apertures(imlist, n_jobs=10, find_aps_param_dict=None,
verbose=False):
""" combine apertures found from different FLAT images
Parameters
----------
imlist: list
list of FLAT
find_aps_param_dict:
the Parameters used in *find_apertures*
n_jobs:
n_jobs for finding apertures in parallel
Returns
-------
ap_combine:
the y-coordinates of good apertures found from FLAT list
"""
if find_aps_param_dict is None:
find_aps_param_dict = dict(start_col=2100, max_drift=9, max_apwidth=13,
n_pix_goodap=1000, n_adj=7, n_smooth=3,
n_sep=5, c=3)
ap_list = Parallel(n_jobs=n_jobs, verbose=2)(
delayed(find_apertures)(im, verbose=verbose, **find_aps_param_dict)
for im in imlist)
if verbose:
print("@TWODSPEC: the numbers of apertures found are: ",
[ap_.shape[0] for ap_ in ap_list])
ap_combine = np.vstack(ap_list)
return ap_combine
def group_apertures(ap_comb, start_col=2100, order_dist=10, verbose=False):
""" group combined apertures to unique apertures
Parameters
----------
ap_comb:
combined apertures
start_col:
the starting column number
order_dist:
the typical distance between orders
Returns
-------
cheb_coefs: ndarray
the chebyshev polynomial coefs
ap_uorder_interp: ndarray
the y-coordinates of unique orders
"""
# extract shape
naps = ap_comb.shape[0]
npix_disp = ap_comb.shape[1]
# initialize
x = np.arange(npix_disp)
yi_start_col = np.zeros(naps)
# rough chebyshev fit & interpolate
for i in range(naps):
y = ap_comb[i]
c = np.polynomial.chebyshev.chebfit(x, y, 2, w=y > 0)
yi_start_col[i] = np.polynomial.chebyshev.chebval(x[start_col], c)
yi_start_col_sorted = np.sort(yi_start_col)
yi_start_col_sortarg = np.argsort(yi_start_col)
# find unique orders
order_arg_list = []
c = 0
for i_uorder in range(naps):
this_order_yi = yi_start_col_sorted[c]
this_order_ind = np.abs(
yi_start_col_sorted - this_order_yi) < order_dist
this_order_arg = yi_start_col_sortarg[this_order_ind]
order_arg_list.append(this_order_arg)
c += len(this_order_arg)
if c >= len(yi_start_col_sorted):
break
# fit & extrapolate
n_uorder = len(order_arg_list)
cheb_coefs = []
ap_uorder_interp = np.zeros((n_uorder, npix_disp))
for i_uorder in range(n_uorder):
n_suborder = len(order_arg_list[i_uorder])
x_data = np.repeat(x[:, None].T, n_suborder, axis=0).flatten()
y_data = ap_comb[order_arg_list[i_uorder], :].flatten()
c = np.polynomial.chebyshev.chebfit(x_data, y_data, 2, w=y_data > 0)
cheb_coefs.append(c)
ap_uorder_interp[i_uorder] = np.polynomial.chebyshev.chebval(x, c)
if verbose:
print("@TWODSPEC: %s unique orders found!" % n_uorder)
return cheb_coefs, ap_uorder_interp
# ################################# #
# extract 1d spectra
# ################################# #
def extract_1dspec(im, ap_uorder_interp, ap_width=7, verbose=False):
naps = ap_uorder_interp.shape[0]
npix = ap_uorder_interp.shape[1]
ap_int = ap_uorder_interp.astype(int)
spec = np.zeros((naps, npix), dtype=float)
sat_mask = np.zeros((naps, npix), dtype=bool)
for i in range(naps):
for j in range(npix):
yc = ap_int[i, j]
data = im[yc - ap_width:yc + ap_width + 1, j]
spec[i, j] = np.sum(data)
sat_mask[i, j] = np.any(data > 60000)
if verbose:
print("@TWODSPEC: extracting 1d spec for order [{0}]".format(i))
return spec, sat_mask
# ################################# #
# find ind for all orders
# ################################# #
def find_ind_order(ap_uorder_interp, ccd_shape, edge_len=(10, 20)):
# find bounds for apertures
ap_uorder_bounds = np.vstack(
(ap_uorder_interp[0] - edge_len[0],
np.diff(ap_uorder_interp, axis=0) * .5 + ap_uorder_interp[:-1],
ap_uorder_interp[-1] + edge_len[1]))
# initialize ind_order
ind_order = np.zeros(ccd_shape, dtype=int)
# generate coordinates
x, y = np.arange(ccd_shape[1]), np.arange(ccd_shape[0])
mx, my = np.meshgrid(x, y)
# for each apertures
for i_order in range(ap_uorder_bounds.shape[0] - 1):
this_order_bound_l = ap_uorder_bounds[i_order, :].reshape(1, -1)
this_order_bound_u = ap_uorder_bounds[i_order + 1].reshape(1, -1)
ind_this_order = np.logical_and(my > this_order_bound_l,
my < this_order_bound_u)
ind_order[ind_this_order] = i_order
print ("@TWODSPEC: marking pixels of order [%s]" % i_order)
# for edges
this_order_bound_l = -1
this_order_bound_u = ap_uorder_bounds[0].reshape(1, -1)
ind_this_order = np.logical_and(my > this_order_bound_l,
my < this_order_bound_u)
ind_order[ind_this_order] = -1
this_order_bound_l = ap_uorder_bounds[-1].reshape(1, -1)
this_order_bound_u = ccd_shape[0]
ind_this_order = np.logical_and(my > this_order_bound_l,
my < this_order_bound_u)
ind_order[ind_this_order] = -2
return ind_order
# ############################################### #
# combine flat according to max counts
# ############################################### #
def combine_flat(flat_list, ap_uorder_interp, sat_count=50000, p=95):
""" combine flat according to max value under sat_count
Parameters
----------
flat_list
ap_uorder_interp
sat_count
p
Returns
-------
"""
# find ind_order
ind_order = find_ind_order(ap_uorder_interp, flat_list[0].shape)
# unique orders
uorder = np.unique(ind_order)
uorder_valid = uorder[uorder >= 0]
# find max for each order
im_max = np.vstack(
[find_each_order_max(im_, ind_order, p) for im_ in flat_list])
# combine flat
flat_final = np.zeros_like(flat_list[0])
flat_origin = np.zeros_like(flat_list[0], dtype=int)
for uorder_ in uorder:
# find pixels of this uorder
ind_this_order = ind_order == uorder_
# which one should be used for this uorder
i_im = find_max_under_saturation(im_max[:, uorder_],
sat_count=sat_count)
# fill the findal image with data from that image
flat_final = np.where(ind_this_order, flat_list[i_im], flat_final)
flat_origin[ind_this_order] = i_im
print("@TWODSPEC: filling data: image [%s] --> uorder [%s]" %
(i_im, uorder_))
return flat_final, flat_origin
def find_max_under_saturation(max_vals, sat_count=45000):
""" find the image with maximum value under saturation count number
Parameters
----------
max_vals
sat_count
Returns
-------
the ID of max value under sat_count
"""
if np.any(max_vals < sat_count):
asort = np.argsort(max_vals)
for i in range(1, len(asort)):
if max_vals[asort[i]] >= sat_count:
return asort[i - 1]
return asort[i]
else:
return np.argmin(max_vals)
# This is actually not used.
# I've implemented *find_each_order_max* for extracting info
# for combining FLAT
def extract_ap_ridge_counts(im, ap_interp):
npix = im.shape[1]
naps = ap_interp.shape[0]
# float -> int
ap_int = ap_interp.astype(int)
spec = np.zeros(ap_int.shape, dtype=float)
sat_mask = np.zeros(ap_int.shape, dtype=bool)
for j in range(npix):
for i in range(naps):
yc = ap_int[i, j]
data = im[yc, j]
spec[i, j] = data.data
return spec, sat_mask
def find_each_order_max(im, ind_order, p=95):
""" find the max for each order
Actually the max value is affected by cosmic rays.
So, finding the 99/95/90 percentiles could be more useful.
Parameters
----------
im:
FLAT image
ind_order:
ind of apertures
p: float
percentile
Returns
-------
max_list: ndarray
"""
uorder = np.unique(ind_order)
uorder_valid = uorder[uorder >= 0]
max_list = np.zeros_like(uorder_valid)
for each_uorder in uorder_valid:
max_list[each_uorder] = np.percentile(
im[np.where(ind_order == each_uorder)], p)
print("@TWODSPEC: finding maximum value for order [%s]" % each_uorder)
return max_list
# ################################# #
# scatterd light correction
# ################################# #
def substract_scattered_light(im_, ap_uorder_interp, ap_width=10,
method='gaussian', method_kwargs=None,
shrink=.85): # 10 should be max
im = ccdproc.CCDData(copy(im_))
assert method in {'gaussian', 'median'}
ind_inter_order = find_inter_order(im, ap_uorder_interp, ap_width=ap_width)
# fill edge with inter-order values
for i_col in range(im.shape[1]):
ind = np.where(ind_inter_order[:, i_col])[0]
# print(ind)
im.data[0:ind[0], i_col] = np.median(im[ind[0]:ind[0]+3, i_col])
im.data[ind[-1]:, i_col] = np.median(im[ind[-1]-3:ind[-1], i_col])
# plt.figure()
# plt.imshow(im, interpolation='nearest')
# initiate scattered light
im_scattered_light = np.zeros_like(im)
# interpolation
x = np.arange(im.shape[0])
print("@TWODSPEC: interpolating scattered light ...")
for i_col in range(im.shape[1]):
w = ind_inter_order[:, i_col]
y = im[:, i_col]
I = interp1d(x[w], y[w], 'linear', bounds_error=False, fill_value=0.)
im_scattered_light[:, i_col] = I(x)
# smooth
print("@TWODSPEC: smoothing scattered light ...")
# print(type(im_scattered_light))
# print(im_scattered_light)
# print(method_kwargs)
if method is 'gaussian':
im_scattered_light_smooth = gaussian(im_scattered_light * shrink, **method_kwargs)
elif method is 'median':
im_scattered_light_smooth = medfilt2d(im_scattered_light * shrink, **method_kwargs)
return im - im_scattered_light_smooth, im_scattered_light_smooth
def find_inter_order(im, ap_uorder_interp, ap_width=10):
ind_inter_order = np.ones_like(im, dtype=bool)
# generate coordinates
x, y = np.arange(im.shape[1]), np.arange(im.shape[0])
mx, my = np.meshgrid(x, y)
# for each apertures
for i_order in range(ap_uorder_interp.shape[0] - 1):
this_order_ridge = ap_uorder_interp[i_order, :].reshape(1, -1)
ind_this_order = np.logical_and(my > this_order_ridge - ap_width,
my < this_order_ridge + ap_width)
ind_inter_order = np.where(ind_this_order, False, ind_inter_order)
print ("@TWODSPEC: *find_inter_order* marking pixels of order [%s]" %
i_order)
# # upper edge
# i_order = 0
# upper_dist_typical = np.median(np.diff(ap_uorder_interp, axis=0)[0])
# upper_dist_use = np.int(np.max([upper_dist_typical - 2 * ap_width, 3]))
# this_order_ridge = ap_uorder_interp[i_order, :].reshape(1, -1)
# ind_this_order = my < (this_order_ridge - ap_width - upper_dist_use)
# ind_inter_order = np.where(ind_this_order, False, ind_inter_order)
#
# # lower edge
# i_order = -1
# lower_dist_typical = np.median(np.diff(ap_uorder_interp, axis=0)[0])
# lower_dist_use = np.int(np.max([lower_dist_typical - 2 * ap_width, 3]))
# this_order_ridge = ap_uorder_interp[i_order, :].reshape(1, -1)
# ind_this_order = my > (this_order_ridge + ap_width + lower_dist_use)
# ind_inter_order = np.where(ind_this_order, False, ind_inter_order)
# bounds
# ind_inter_order[0, :] = True
# ind_inter_order[-1, :] = True
# plt.figure()
# plt.imshow(ind_inter_order, interpolation='nearest')
# print(ind_inter_order)
return ind_inter_order
# ################################# #
# sensitivity variation correction
# ################################# #
def apflatten(flat, ap_uorder_interp, ap_width=(-8, 8), pct=50, **normalization):
""" IRAF apflatten replication """
# symmetric case
if not isinstance(ap_width, tuple):
ap_width = (-ap_width, ap_width)
ap_npix = ap_width[1] - ap_width[0] + 1
# fix apertures
n_order, disp_npix = ap_uorder_interp.shape
# get slice along dispersion axis
order, ofst, xcoord, ycoord = get_aperture_index(ap_uorder_interp,
ap_width=ap_width)
# slice 2D FLAT
flat_slc = flat[ycoord, xcoord]
# get 1d FLAT by sextract method (simple sum)
flat1d_simple = sextract_all_aperture(
flat, ap_uorder_interp, ap_width=ap_width)
# fit 1d FLAT
flat1d_cont = normalize_spectra_block(
xcoord[0], flat1d_simple, norm_range=xcoord[0, [0, -1]], **normalization)[1]
# tile shape
flat_slc_cont = np.tile(
flat1d_cont, (1, ap_npix)).reshape(ap_npix * n_order, -1)
# caculate profile -> for FLAT
flat1d_profile = np.percentile(flat_slc.data / flat_slc_cont, pct, axis=1)
# reconstruct sliced 2d FLAT model
flat_slc_model = flat1d_profile.reshape(-1, 1) * flat_slc_cont
# normalize 2d FLAT to its model
flat_slc_norm = flat_slc / flat_slc_model
# caculate profile -> for STAR
flat_slc_sum = np.tile(flat1d_simple, (1, ap_npix)).reshape(-1, disp_npix)
star_slc_profile = flat_slc.data / flat_slc_sum
# put back data
flat2d_norm = np.ones_like(flat)
flat2d_model = np.ones_like(flat)
flat2d_norm[ycoord, xcoord] = flat_slc_norm
flat2d_model[ycoord, xcoord] = flat_slc_model
profile_slc = np.tile(flat1d_profile.reshape(-1, 1), (1, disp_npix))
profile2d = np.zeros_like(flat)
profile2d[ycoord, xcoord] = profile_slc
return dict(
flat=flat,
flat_slc=flat_slc,
flat1d_simple=flat1d_simple,
flat1d_cont=flat1d_cont,
flat_slc_cont=flat_slc_cont,
flat1d_profile=flat1d_profile,
flat_slc_model=flat_slc_model,
flat_slc_norm=flat_slc_norm,
flat2d_norm=flat2d_norm,
flat2d_model=flat2d_model,
profile_slc=profile_slc,
profile2d=profile2d,
star_slc_profile=star_slc_profile,
)
def select_a_specific_aperture(order, ofst, x_coord, y_coord, specific_order=0):
""" select a specific aperture """
# determine sub-index
ind = np.where(order == specific_order)[0]
return order[ind], ofst[ind], x_coord[ind], y_coord[ind]
def sextract_a_specific_aperture(img, ap_uorder_interp, ap_width=(-8, 8), specific_order=0, func=np.sum):
""" simple extraction for a specific order """
# symmetric case
if not isinstance(ap_width, tuple):
ap_width = (-ap_width, ap_width)
# fix apertures
ap_uorder_interp = ap_uorder_interp.astype(int)
# get slice along dispersion axis
order, ofst, xcoord, ycoord = get_aperture_index(
ap_uorder_interp, ap_width=ap_width)
order, ofst, xcoord, ycoord = select_a_specific_aperture(
order, ofst, xcoord, ycoord, specific_order=specific_order)
# extract using a simple sum
spec1d = func(img[ycoord, xcoord], axis=0)
return spec1d
def sextract_all_aperture(img, ap_uorder_interp, ap_width=(-8, 8), func=np.sum):
""" simple extraction for all orders """
# symmetric case
if not isinstance(ap_width, tuple):
ap_width = (-ap_width, ap_width)
# fix apertures
ap_uorder_interp = ap_uorder_interp.astype(int)
# get slice along dispersion axis
order, ofst, xcoord, ycoord = get_aperture_index(
ap_uorder_interp, ap_width=ap_width)
# initiate spec1d
spec1d = np.zeros(ap_uorder_interp.shape)
# simple extraction
for specific_order in range(ap_uorder_interp.shape[0]):
# get index of this aperture
order_, ofst_, xcoord_, ycoord_ = select_a_specific_aperture(
order, ofst, xcoord, ycoord, specific_order=specific_order)
# extract using a simple sum
spec1d[specific_order] = func(img[ycoord_, xcoord_], axis=0)
return spec1d
def get_aperture_index(ap_uorder_interp, ap_width=(-8, 8)):
""" return X & Y index for all apertures """
# fix pixel index
ap_uorder_interp = ap_uorder_interp.astype(int)
# number of apertures
n_order = ap_uorder_interp.shape[0]
npix_apwidth = ap_width[1] - ap_width[0] + 1
npix_aplength = ap_uorder_interp.shape[1]
# initiate X & Y
x_coord = np.arange(npix_aplength, dtype=int).reshape(1, -1).repeat(npix_apwidth*n_order, axis=0)
y_coord = np.zeros((npix_apwidth*n_order, npix_aplength), dtype=int)
# determine Y
for i_order in np.arange(ap_uorder_interp.shape[0], dtype=int):
row_start = i_order*npix_apwidth
ofst = np.arange(ap_width[0], ap_width[1]+1, dtype=int).reshape(-1, 1)
y_coord[row_start:row_start+npix_apwidth] = ap_uorder_interp[i_order] + ofst
# determine order & ofst
order = np.arange(n_order, dtype=int).repeat(npix_apwidth)
ofst = np.arange(ap_width[0], ap_width[1] + 1, dtype=int).reshape(1, -1).repeat(n_order, axis=0).reshape((-1,))
return order, ofst, x_coord, y_coord
# ################################# #
# apbackground
# ################################# #
def apbackground(img, ap_uorder_interp, offsetlim=(-5, 5),
npix_inter=3, kernel_size=(11, 11)):
""" determine background/scattered light using inter-aperture pixels """
img = np.array(img)
# determine inter-aperture pixels
ap_lo = np.sum(np.array([3, -3, 1]).reshape(-1, 1) * ap_uorder_interp[0:3], axis=0)
ap_hi = np.sum(np.array([1, -3, 3]).reshape(-1, 1) * ap_uorder_interp[-3:], axis=0)
ap_extended = np.vstack((ap_lo, ap_uorder_interp, ap_hi))
ap_inter = (ap_extended[1:] + ap_extended[:-1]) * .5
# determine the valley of interp-aperture pixels
ap_width, sys_offset = apwidth(img, ap_inter, offsetlim=(-8, 8),
ap_npix=npix_inter, method='min')
# get slice along dispersion axis
order, ofst, xcoord, ycoord = get_aperture_index(ap_inter, ap_width=ap_width)
# extract median interp-aperture pixels
spec1d_inter = sextract_all_aperture(img, ap_inter, ap_width=ap_width, func=np.median)
spec_slc_tiled = np.tile(spec1d_inter, (1, ap_width[1] - ap_width[0] + 1)
).reshape(-1, ap_inter.shape[1])
# interpolate median background
sl = np.zeros_like(img) * np.nan
for i_col in range(sl.shape[1]):
# xcoord_ = xcoord[:, i_col]
ycoord_ = ycoord[:, i_col]
sl_ = spec_slc_tiled[:, i_col]
sl_interp = interp1d(ycoord_, sl_, bounds_error=False)(np.arange(sl.shape[0]))
# fill the ends
ind_end = np.where(np.isfinite(sl_interp))[0][[0, -1]]
sl_interp[:ind_end[0]] = sl_interp[ind_end[0]]
sl_interp[ind_end[-1] + 1:] = sl_interp[ind_end[-1]]
sl[:, i_col] = sl_interp
# median filter
sls = medfilt2d(sl, kernel_size=kernel_size)
res = dict(
bg=sl,
bg_smoothed=sls
)
return res
# ################################# #
# apwidth
# ################################# #
def apwidth(img, ap_uorder_interp, offsetlim=(-8, 8), ap_npix=10, method='max'):
""" automatically find ap_width for a given ap_npix """
img = np.array(img)
ap_width_max = offsetlim[1] - offsetlim[0] + 1
assert 1 <= ap_npix <= ap_width_max
# determine SNR of each offset
ofst, medsnr_lnsum = apoffset_snr(img, ap_uorder_interp, offsetlim=offsetlim)
if method is "max":
istart = np.argmax(medsnr_lnsum)
sys_offset = ofst[istart]
i_ap_width_lo, i_ap_width_hi = istart, istart
i_ap_width_min, i_ap_width_max = 0, len(ofst) - 1
while (i_ap_width_hi - i_ap_width_lo + 1) < ap_npix:
if i_ap_width_lo - 1 < i_ap_width_min:
# towards high end
i_ap_width_hi += 1
elif i_ap_width_hi + 1 > i_ap_width_max:
# towards low end
i_ap_width_lo -= 1
# then index is inside
elif medsnr_lnsum[i_ap_width_hi + 1] >= medsnr_lnsum[
i_ap_width_lo - 1]:
# towards high end
i_ap_width_hi += 1
elif medsnr_lnsum[i_ap_width_hi + 1] < medsnr_lnsum[
i_ap_width_lo - 1]:
# towards low end
i_ap_width_lo -= 1
else:
raise ValueError(
"@SONG: value error when automatically finding ap_width\n"
"{0}".format(medsnr_lnsum.__repr__()))
elif method is "min":
istart = np.argmin(medsnr_lnsum)
sys_offset = ofst[istart]
i_ap_width_lo, i_ap_width_hi = istart, istart
i_ap_width_min, i_ap_width_max = 0, len(ofst) - 1
while (i_ap_width_hi - i_ap_width_lo + 1) < ap_npix:
if i_ap_width_lo - 1 < i_ap_width_min:
# towards high end
i_ap_width_hi += 1
elif i_ap_width_hi + 1 > i_ap_width_max:
# towards low end
i_ap_width_lo -= 1
# then index is inside
elif medsnr_lnsum[i_ap_width_hi + 1] <= medsnr_lnsum[
i_ap_width_lo - 1]:
# towards high end
i_ap_width_hi += 1
elif medsnr_lnsum[i_ap_width_hi + 1] > medsnr_lnsum[
i_ap_width_lo - 1]:
# towards low end
i_ap_width_lo -= 1
else:
raise ValueError(
"@SONG: value error when automatically finding ap_width\n"
"{0}".format(medsnr_lnsum.__repr__()))
else:
raise ValueError("@SONG: invalid method! method={0}".format(method))
ap_width = (ofst[i_ap_width_lo], ofst[i_ap_width_hi])
return ap_width, sys_offset
# ################################# #
# apoffset
# ################################# #
def apoffset_snr(img, ap_uorder_interp, offsetlim=(-8, 8)):
""" determine the signal-to-noise ratio for offset """
img = np.array(img)
n_offset = offsetlim[1] - offsetlim[0] + 1
# Poisson error
var = np.abs(img)
snr = img/np.sqrt(var)
# get index
order, ofst, xcoord, ycoord = get_aperture_index(
ap_uorder_interp, ap_width=offsetlim)
# median signal-to-noise ratio for each offset
medsnr = np.nanmedian(snr[ycoord, xcoord], axis=1)
medsnr_lnsum = np.zeros((n_offset,), dtype=float)
for i in np.arange(n_offset, dtype=int):
medsnr_lnsum[i] = np.nansum(np.log(medsnr[i::n_offset]))
ofst = np.arange(offsetlim[0], offsetlim[1] + 1, dtype=int)
return ofst, medsnr_lnsum
# ################################# #
# apvariance
# ################################# #
def apvariance():
pass
# ################################# #
# apsaturation
# ################################# #
def apsaturation(img, adulim=65536., gain=4.0):
# return the saturation mask of img
return img > (adulim*gain)
# ################
def get_dispersion_slice_index(ap_uorder_interp, i_order, offset=0):
indx = np.arange(ap_uorder_interp.shape[1], dtype=int)
indy = np.array(ap_uorder_interp[i_order]+offset, dtype=int)
return indy, indx
def get_dispersion_slice(im, ap_uorder_interp, i_order, offset=0):
indy, indx = get_dispersion_slice_index(
ap_uorder_interp, i_order, offset=offset)
return np.array(im)[indy, indx]
def put_dispersion_slice(data, im, ap_uorder_interp, i_order, offset=0):
# get index
indy, indx = get_dispersion_slice_index(
ap_uorder_interp, i_order, offset=offset)
# put data
im_ = np.array(copy(im))
im_[indy, indx] = data
return im_
|
hypergravity/hrs
|
twodspec/aperture.py
|
Python
|
bsd-3-clause
| 33,739
|
[
"Gaussian"
] |
d25a935d46e7100f293a3e4a9b98f78352260ad778d9e0a5f648022957951496
|
#!/usr/bin/env python
import numpy as np
"""This module contains MISMIP constants and parameters, as well as functions
computing theoretical steady state profiles corresponding to various MISMIP
experiments.
It should not be cluttered with plotting or NetCDF output code.
"""
def secpera():
"Number of seconds per year."
return 3.15569259747e7
def L():
"The length of the MISMIP domain."
return 1800e3
def N(mode):
"Number of grid spaces corresponding to a MISMIP 'mode.'"
if mode == 1:
return 150
if mode == 2:
return 1500
raise ValueError("invalid mode (%s)" % mode)
def A(experiment, step):
"""Ice softness parameter for given experiment and step."""
A1 = np.array([4.6416e-24, 2.1544e-24, 1.0e-24,
4.6416e-25, 2.1544e-25, 1.0e-25,
4.6416e-26, 2.1544e-26, 1.0e-26])
# Values of A to be used in experiments 1 and 2.
A3a = np.array([3.0e-25, 2.5e-25, 2.0e-25,
1.5e-25, 1.0e-25, 5.0e-26,
2.5e-26, 5.0e-26, 1.0e-25,
1.5e-25, 2.0e-25, 2.5e-25,
3.0e-25])
# Values of A to be used in experiment 3a.
A3b = np.array([1.6e-24, 1.4e-24, 1.2e-24,
1.0e-24, 8.0e-25, 6.0e-25,
4.0e-25, 2.0e-25, 4.0e-25,
6.0e-25, 8.0e-25, 1.0e-24,
1.2e-24, 1.4e-24, 1.6e-24])
# Values of A to be used in experiment 3b.
try:
if experiment in ("1a", "1b", "2a", "2b"):
return A1[step - 1]
if experiment == "3a":
return A3a[step - 1]
if experiment == "3b":
return A3b[step - 1]
except:
raise ValueError("invalid step (%s) for experiment %s" % (step, experiment))
raise ValueError("invalid experiment (%s)" % experiment)
def run_length(experiment, step):
"""Returns the time interval for an experiment 3 step."""
T3a = np.array([3.0e4, 1.5e4, 1.5e4,
1.5e4, 1.5e4, 3.0e4,
3.0e4, 1.5e4, 1.5e4,
3.0e4, 3.0e4, 3.0e4,
1.5e4])
# Time intervals to be used in experiment 3a.
T3b = np.array([3.0e4, 1.5e4, 1.5e4,
1.5e4, 1.5e4, 1.5e4,
1.5e4, 3.0e4, 1.5e4,
1.5e4, 1.5e4, 1.5e4,
1.5e4, 3.0e4, 1.5e4])
# Time intervals to be used in experiment 3b.
try:
if experiment == "3a":
return T3a[step - 1]
if experiment == "3b":
return T3b[step - 1]
except:
raise ValueError("invalid step (%s) for experiment %s" % (step, experiment))
return 3e4
def rho_i():
"Ice density"
return 900.0
def rho_w():
"Water density"
return 1000.0
def g():
"""Acceleration due to gravity. (Table 2 on page 19 of mismip_4.pdf
uses this value, i.e. g = 9.8 m s-2.)"""
return 9.8
def n():
"Glen exponent"
return 3.0
def a():
"Accumulation rate (m/s)"
return 0.3 / secpera()
def m(experiment):
"Sliding law exponent"
if experiment in ("1a", "2a", "3a"):
return 1 / 3.0
if experiment in ("1b", "2b", "3b"):
return 1.0
raise ValueError("invalid experiment (%s)" % experiment)
def C(experiment):
"Sliding law coefficient"
if experiment in ("1a", "2a", "3a"):
return 7.624e6
if experiment in ("1b", "2b", "3b"):
return 7.2082e10
raise ValueError("invalid experiment (%s)" % experiment)
def b(experiment, x):
"Bed depth below sea level. (-b(x) = topg(x))"
if experiment in ("1a", "1b", "2a", "2b"):
return -720. + 778.5 * (x / 7.5e5)
if experiment in ("3a", "3b"):
xx = x / 7.5e5
return -(729. - 2184.8 * xx ** 2. + 1031.72 * xx ** 4. - 151.72 * xx ** 6.)
raise ValueError("invalid experiment (%s)" % experiment)
def b_slope(experiment, x):
"""The x-derivative of b(experiment, x)."""
if experiment in ("1a", "1b", "2a", "2b"):
return 778.5 / 7.5e5
if experiment in ("3a", "3b"):
xx = x / 7.5e5
return -(- 2184.8 * (2. / 7.5e5) * xx
+ 1031.72 * (4. / 7.5e5) * xx ** 3.
- 151.72 * (6. / 7.5e5) * xx ** 5.)
raise ValueError("invalid experiment (%s)" % experiment)
def cold_function(experiment, step, x, theta=0.0):
"""Evaluates function whose zeros define x_g in 'cold' steady marine sheet problem."""
r = rho_i() / rho_w()
h_f = r ** (-1.) * b(experiment, x)
b_x = b_slope(experiment, x)
s = a() * x
rho_g = rho_i() * g()
return (theta * a()
+ C(experiment) * s ** (m(experiment) + 1.0) / (rho_g * h_f ** (m(experiment) + 2.))
- theta * s * b_x / h_f
- A(experiment, step) * (rho_g * (1.0 - r) / 4.0) ** n() * h_f ** (n() + 1.0))
def x_g(experiment, step, theta=0.0):
"""Computes the theoretical grounding line location using Newton's method."""
# set the initial guess
if experiment in ("3a", "3b"):
x = 800.0e3
else:
x = 1270.0e3
delta_x = 10. # Finite difference step size (metres) for gradient calculation
tolf = 1.e-4 # Tolerance for finding zeros
eps = np.finfo(float).eps
normf = tolf + eps
toldelta = 1.e1 # Newton step size tolerance
dx = toldelta + 1.0
# this is just a shortcut
def F(x):
return cold_function(experiment, step, x, theta)
while (normf > tolf) or (abs(dx) > toldelta):
f = F(x)
normf = abs(f)
grad = (F(x + delta_x) - f) / delta_x
dx = -f / grad
x = x + dx
return x
def thickness(experiment, step, x, theta=0.0):
"""Compute ice thickness for x > 0.
"""
# compute the grounding line position
xg = x_g(experiment, step, theta)
def surface(h, x):
b_x = b_slope(experiment, x)
rho_g = rho_i() * g()
s = a() * np.abs(x)
return b_x - (C(experiment) / rho_g) * s ** m(experiment) / h ** (m(experiment) + 1)
# extract the grounded part of the grid
x_grounded = x[x < xg]
# We will integrate from the grounding line inland. odeint requires that
# the first point in x_grid be the one corresponding to the initial
# condition; append it and reverse the order.
x_grid = np.append(xg, x_grounded[::-1])
# use thickness at the grounding line as the initial condition
h_f = b(experiment, xg) * rho_w() / rho_i()
import scipy.integrate
thk_grounded = scipy.integrate.odeint(surface, [h_f], x_grid, atol=1.e-9, rtol=1.e-9)
# now 'result' contains thickness in reverse order, including the grounding
# line point (which is not on the grid); discard it and reverse the order.
thk_grounded = np.squeeze(thk_grounded)[:0:-1]
# extract the floating part of the grid
x_floating = x[x >= xg]
# compute the flux through the grounding line
q_0 = a() * xg
# Calculate ice thickness for shelf from van der Veen (1986)
r = rho_i() / rho_w()
rho_g = rho_i() * g()
numer = h_f * (q_0 + a() * (x_floating - xg))
base = q_0 ** (n() + 1) + h_f ** (n() + 1) * ((1 - r) * rho_g / 4) ** n() * A(experiment, step) \
* ((q_0 + a() * (x_floating - xg)) ** (n() + 1) - q_0 ** (n() + 1)) / a()
thk_floating = numer / (base ** (1.0 / (n() + 1)))
return np.r_[thk_grounded, thk_floating]
def plot_profile(experiment, step, out_file):
from pylab import figure, subplot, hold, plot, xlabel, ylabel, text, title, axis, vlines, savefig
if out_file is None:
out_file = "MISMIP_%s_A%d.pdf" % (experiment, step)
xg = x_g(experiment, step)
x = np.linspace(0, L(), N(2) + 1)
thk = thickness(experiment, step, x)
x_grounded, thk_grounded = x[x < xg], thk[x < xg]
x_floating, thk_floating = x[x >= xg], thk[x >= xg]
figure(1)
ax = subplot(111)
hold(True)
plot(x / 1e3, np.zeros_like(x), ls='dotted', color='red')
plot(x / 1e3, -b(experiment, x), color='black')
plot(x / 1e3, np.r_[thk_grounded - b(experiment, x_grounded),
thk_floating * (1 - rho_i() / rho_w())],
color='blue')
plot(x_floating / 1e3, -thk_floating * (rho_i() / rho_w()), color='blue')
_, _, ymin, ymax = axis(xmin=0, xmax=x.max() / 1e3)
vlines(xg / 1e3, ymin, ymax, linestyles='dashed', color='black')
xlabel('distance from the summit, km')
ylabel('elevation, m')
text(0.6, 0.9, "$x_g$ (theory) = %4.0f km" % (xg / 1e3),
color='black', transform=ax.transAxes)
title("MISMIP experiment %s, step %d" % (experiment, step))
savefig(out_file)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.usage = "%prog [options]"
parser.description = "Plots the theoretical geometry profile corresponding to MISMIP experiment and step."
parser.add_option("-e", "--experiment", dest="experiment", type="string",
default='1a',
help="MISMIP experiments (one of '1a', '1b', '2a', '2b', '3a', '3b')")
parser.add_option("-s", "--step", dest="step", type="int", default=1,
help="MISMIP step number")
parser.add_option("-o", dest="out_file", help="output file name")
(opts, args) = parser.parse_args()
plot_profile(opts.experiment, opts.step, opts.out_file)
|
talbrecht/pism_pik07
|
examples/mismip/mismip2d/MISMIP.py
|
Python
|
gpl-3.0
| 9,436
|
[
"NetCDF"
] |
2b067e5b96465f0d5a3894066abc7ef7dc5f3e7dc5fa1bf04761ee8d18e435a0
|
from functools import partial
from logging import getLogger
from pogo_async import PGoApi, exceptions as ex
from pogo_async.auth_ptc import AuthPtc
from pogo_async.utilities import get_cell_ids
from pogo_async.hash_server import HashServer
from asyncio import sleep, Lock, Semaphore, get_event_loop
from random import choice, randint, uniform, triangular
from time import time, monotonic
from array import array
from queue import Empty
from aiohttp import ClientSession, ProxyConnectionError
from .db import SIGHTING_CACHE, MYSTERY_CACHE, Bounds
from .utils import random_sleep, round_coords, load_pickle, load_accounts, get_device_info, get_spawn_id, get_distance, get_start_coords
from .shared import DatabaseProcessor
from . import config
if config.NOTIFY:
from .notification import Notifier
if config.CONTROL_SOCKS:
from stem import Signal
from stem.control import Controller
import stem.util.log
stem.util.log.get_logger().level = 40
CIRCUIT_TIME = dict()
CIRCUIT_FAILURES = dict()
for proxy in config.PROXIES:
CIRCUIT_TIME[proxy] = monotonic()
CIRCUIT_FAILURES[proxy] = 0
else:
CIRCUIT_TIME = None
CIRCUIT_FAILURES = None
class Worker:
"""Single worker walking on the map"""
download_hash = "11dcdeb848ed224924f8a8e14d94d620c8966d44"
g = {'seen': 0, 'captchas': 0}
db_processor = DatabaseProcessor()
spawns = db_processor.spawns
accounts = load_accounts()
if config.CACHE_CELLS:
cell_ids = load_pickle('cells') or {}
loop = get_event_loop()
login_semaphore = Semaphore(config.SIMULTANEOUS_LOGINS)
sim_semaphore = Semaphore(config.SIMULTANEOUS_SIMULATION)
proxies = None
proxy = None
if config.PROXIES:
if len(config.PROXIES) == 1:
proxy = config.PROXIES.pop()
else:
proxies = config.PROXIES.copy()
if config.NOTIFY:
notifier = Notifier(spawns)
g['sent'] = 0
def __init__(self, worker_no):
self.worker_no = worker_no
self.logger = getLogger('worker-{}'.format(worker_no))
# account information
try:
self.account = self.extra_queue.get_nowait()
except Empty as e:
try:
self.account = self.captcha_queue.get_nowait()
except Empty as e:
raise ValueError("You don't have enough accounts for the number of workers specified in GRID.") from e
self.username = self.account['username']
self.location = self.account.get('location', get_start_coords(worker_no))
self.inventory_timestamp = self.account.get('inventory_timestamp')
# last time of any request
self.last_request = self.account.get('time', 0)
# last time of a request that requires user interaction in the game
self.last_action = self.last_request
# last time of a GetMapObjects request
self.last_gmo = self.last_request
self.items = self.account.get('items', {})
self.player_level = self.account.get('player_level')
self.num_captchas = 0
self.eggs = {}
self.unused_incubators = []
# API setup
if self.proxies:
self.new_proxy(set_api=False)
self.initialize_api()
# State variables
self.busy = BusyLock()
self.killed = False
# Other variables
self.after_spawn = None
self.speed = 0
self.account_start = None
self.total_seen = 0
self.error_code = 'INIT'
self.item_capacity = 350
self.visits = 0
self.pokestops = config.SPIN_POKESTOPS
self.next_spin = 0
def initialize_api(self):
device_info = get_device_info(self.account)
self.logged_in = False
self.ever_authenticated = False
self.empty_visits = 0
self.account_seen = 0
self.api = PGoApi(device_info=device_info)
if config.HASH_KEY:
self.api.activate_hash_server(config.HASH_KEY)
self.api.set_position(*self.location)
if self.proxy:
self.api.set_proxy(self.proxy)
self.api.set_logger(self.logger)
if self.account.get('provider') == 'ptc' and self.account.get('refresh'):
self.api._auth_provider = AuthPtc(username=self.username, password=self.account['password'], timeout=config.LOGIN_TIMEOUT)
self.api._auth_provider.set_refresh_token(self.account.get('refresh'))
self.api._auth_provider._access_token = self.account.get('auth')
self.api._auth_provider._access_token_expiry = self.account.get('expiry')
if self.api._auth_provider.check_access_token():
self.api._auth_provider._login = True
self.logged_in = True
self.ever_authenticated = True
def new_proxy(self, set_api=True):
self.proxy = self.proxies.pop()
if not self.proxies:
self.proxies.update(config.PROXIES)
if set_api:
self.api.set_proxy(self.proxy)
def swap_circuit(self, reason=''):
time_passed = monotonic() - CIRCUIT_TIME[self.proxy]
if time_passed > 180:
socket = config.CONTROL_SOCKS[self.proxy]
with Controller.from_socket_file(path=socket) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
CIRCUIT_TIME[self.proxy] = monotonic()
CIRCUIT_FAILURES[self.proxy] = 0
self.logger.warning('Changed circuit on {p} due to {r}.'.format(
p=self.proxy, r=reason))
else:
self.logger.info('Skipped changing circuit on {p} because it was '
'changed {s} seconds ago.'.format(
p=self.proxy, s=time_passed))
async def login(self):
"""Logs worker in and prepares for scanning"""
self.logger.info('Trying to log in')
self.error_code = '»'
async with self.login_semaphore:
if self.killed:
return False
self.error_code = 'LOGIN'
for attempt in range(-1, config.MAX_RETRIES):
try:
await self.api.set_authentication(
username=self.username,
password=self.account['password'],
provider=self.account.get('provider', 'ptc'),
timeout=config.LOGIN_TIMEOUT
)
except ex.AuthTimeoutException:
if attempt >= config.MAX_RETRIES - 1:
raise
else:
self.logger.warning('Login attempt timed out.')
else:
break
self.error_code = '°'
version = 5302
async with self.sim_semaphore:
self.error_code = 'APP SIMULATION'
if config.APP_SIMULATION and not self.ever_authenticated:
await self.app_simulation_login(version)
else:
await self.download_remote_config(version)
self.ever_authenticated = True
self.logged_in = True
self.error_code = None
self.account_start = time()
return True
async def download_remote_config(self, version):
request = self.api.create_request()
request.download_remote_config_version(platform=1, app_version=version)
responses = await self.call(request, stamp=False, buddy=False, settings=True, dl_hash=False)
inventory_items = responses.get('GET_INVENTORY', {}).get('inventory_delta', {}).get('inventory_items', [])
for item in inventory_items:
player_stats = item.get('inventory_item_data', {}).get('player_stats', {})
if player_stats:
self.player_level = player_stats.get('level')
break
await random_sleep(.78, .95)
async def set_avatar(self, tutorial=False):
await random_sleep(7, 14)
request = self.api.create_request()
gender = randint(0, 1)
if gender == 1:
# female
shirt = randint(0, 8)
pants = randint(0, 5)
backpack = randint(0, 2)
else:
# male
shirt = randint(0, 3)
pants = randint(0, 2)
backpack = randint(0, 5)
request.set_avatar(player_avatar={
'skin': randint(0, 3),
'hair': randint(0, 5),
'shirt': shirt,
'pants': pants,
'hat': randint(0, 4),
'shoes': randint(0, 6),
'avatar': gender,
'eyes': randint(0, 4),
'backpack': backpack
})
await self.call(request, buddy=not tutorial, action=1)
if tutorial:
await random_sleep(.3, .5)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=1)
await self.call(request, buddy=False)
await random_sleep(2.5, 2.75)
else:
await random_sleep(1, 1.2)
request = self.api.create_request()
request.get_player_profile()
await self.call(request, action=1)
async def app_simulation_login(self, version):
self.logger.info('Starting RPC login sequence (iOS app simulation)')
reset_avatar = False
# empty request
request = self.api.create_request()
await self.call(request, chain=False)
await random_sleep(.3, .5)
# request 1: get_player
request = self.api.create_request()
request.get_player(player_locale=config.PLAYER_LOCALE)
responses = await self.call(request, chain=False)
tutorial_state = None
try:
get_player = responses['GET_PLAYER']
if get_player.get('banned', False):
raise ex.BannedAccountException
player_data = get_player['player_data']
tutorial_state = player_data.get('tutorial_state', [])
self.item_capacity = player_data['max_item_storage']
if 'created' not in self.account:
self.account['created'] = player_data['creation_timestamp_ms'] / 1000
avatar = player_data['avatar']
if avatar['avatar'] == 1 and avatar['backpack'] > 2:
self.logger.warning('Invalid backpack for female, resetting avatar.')
reset_avatar = True
except (KeyError, TypeError, AttributeError):
pass
await random_sleep(.7, 1.2)
# request 2: download_remote_config_version
await self.download_remote_config(version)
# request 3: get_asset_digest
request = self.api.create_request()
request.get_asset_digest(platform=1, app_version=version)
await self.call(request, buddy=False, settings=True)
await random_sleep(.9, 3.1)
if (config.COMPLETE_TUTORIAL and
tutorial_state is not None and
not all(x in tutorial_state for x in (0, 1, 3, 4, 7))):
self.logger.warning('{} is starting tutorial'.format(self.username))
await self.complete_tutorial(tutorial_state)
else:
# request 4: get_player_profile
request = self.api.create_request()
request.get_player_profile()
await self.call(request, settings=True)
await random_sleep(.3, .5)
if self.player_level:
# request 5: level_up_rewards
request = self.api.create_request()
request.level_up_rewards(level=self.player_level)
await self.call(request, settings=True)
await random_sleep(.45, .7)
else:
self.logger.warning('No player level')
# request 6: register_background_device
request = self.api.create_request()
request.register_background_device(device_type='apple_watch')
await self.call(request, action=0.1)
self.logger.info('Finished RPC login sequence (iOS app simulation)')
if reset_avatar:
await self.set_avatar()
await random_sleep(.2, .462)
self.error_code = None
return True
async def complete_tutorial(self, tutorial_state):
self.error_code = 'TUTORIAL'
if 0 not in tutorial_state:
# legal screen
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=[0])
await self.call(request, buddy=False)
await random_sleep(.475, .525)
request = self.api.create_request()
request.get_player(player_locale=config.PLAYER_LOCALE)
await self.call(request, buddy=False)
await sleep(1)
if 1 not in tutorial_state:
# avatar selection
await self.set_avatar(tutorial=True)
await random_sleep(.5, .6)
request = self.api.create_request()
await self.call(request, chain=False)
await sleep(.05)
request = self.api.create_request()
request.register_background_device(device_type='apple_watch')
await self.call(request)
starter_id = None
if 3 not in tutorial_state:
# encounter tutorial
await sleep(1)
request = self.api.create_request()
request.get_download_urls(asset_id=['1a3c2816-65fa-4b97-90eb-0b301c064b7a/1477084786906000',
'aa8f7687-a022-4773-b900-3a8c170e9aea/1477084794890000',
'e89109b0-9a54-40fe-8431-12f7826c8194/1477084802881000'])
await self.call(request)
await random_sleep(5, 10)
request = self.api.create_request()
starter = choice((1, 4, 7))
request.encounter_tutorial_complete(pokemon_id=starter)
await self.call(request, action=1)
await random_sleep(.4, .55)
request = self.api.create_request()
request.get_player(player_locale=config.PLAYER_LOCALE)
responses = await self.call(request)
inventory = responses.get('GET_INVENTORY', {}).get('inventory_delta', {}).get('inventory_items', [])
for item in inventory:
pokemon = item.get('inventory_item_data', {}).get('pokemon_data')
if pokemon:
starter_id = pokemon.get('id')
if 4 not in tutorial_state:
# name selection
await random_sleep(10, 16)
request = self.api.create_request()
request.claim_codename(codename=self.username)
await self.call(request, action=1)
await random_sleep(1, 1.3)
request = self.api.create_request()
request.get_player(player_locale=config.PLAYER_LOCALE)
await self.call(request)
await sleep(.1)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=4)
await self.call(request, buddy=False)
if 7 not in tutorial_state:
# first time experience
await random_sleep(3.75, 4.5)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=7)
await self.call(request)
if starter_id:
await random_sleep(3, 5)
request = self.api.create_request()
request.set_buddy_pokemon(pokemon_id=starter_id)
await self.call(request, action=1)
await random_sleep(.8, 1.2)
await sleep(.2)
return True
def update_inventory(self, inventory_items):
for thing in inventory_items:
obj = thing.get('inventory_item_data', {})
if 'item' in obj:
item = obj['item']
item_id = item.get('item_id')
self.items[item_id] = item.get('count', 0)
elif config.INCUBATE_EGGS:
if ('pokemon_data' in obj and
obj['pokemon_data'].get('is_egg')):
egg = obj['pokemon_data']
egg_id = egg.get('id')
self.eggs[egg_id] = egg
elif 'egg_incubators' in obj:
self.unused_incubators = []
for item in obj['egg_incubators'].get('egg_incubator',[]):
if 'pokemon_id' in item:
continue
if item.get('item_id') == 901:
self.unused_incubators.append(item)
else:
self.unused_incubators.insert(0, item)
async def call(self, request, chain=True, stamp=True, buddy=True, settings=False, dl_hash=True, action=None):
if chain:
request.check_challenge()
request.get_hatched_eggs()
if stamp and self.inventory_timestamp:
request.get_inventory(last_timestamp_ms=self.inventory_timestamp)
else:
request.get_inventory()
request.check_awarded_badges()
if settings:
if dl_hash:
request.download_settings(hash=self.download_hash)
else:
request.download_settings()
if buddy:
request.get_buddy_walked()
try:
refresh = HashServer.status.get('period')
while HashServer.status.get('remaining') < 5 and time() < refresh:
self.error_code = 'HASH WAITING'
wait = refresh - time() + 1
await sleep(wait)
refresh = HashServer.status.get('period')
except TypeError:
pass
now = time()
if action:
# wait for the time required, or at least a half-second
if self.last_action > now + .5:
await sleep(self.last_action - now)
else:
await sleep(0.5)
for _ in range(-1, config.MAX_RETRIES):
try:
response = await request.call()
if response:
break
else:
raise ex.MalformedResponseException('empty response')
except (ex.NotLoggedInException, ex.AuthException):
self.logged_in = False
await self.login()
await sleep(2)
except ex.HashingOfflineException:
self.logger.warning('Hashing server busy or offline.')
self.error_code = 'HASHING OFFLINE'
await sleep(7.5)
except ex.NianticOfflineException:
self.logger.warning('Niantic busy or offline.')
self.error_code = 'NIANTIC OFFLINE'
await random_sleep()
except ex.HashingQuotaExceededException:
self.logger.warning('Exceeded your hashing quota, sleeping.')
self.error_code = 'QUOTA EXCEEDED'
refresh = HashServer.status.get('period')
now = time()
if refresh:
if refresh > now:
await sleep(refresh - now + 1)
else:
await sleep(5)
else:
await sleep(30)
except ex.NianticThrottlingException:
self.logger.warning('Server throttling - sleeping for a bit')
self.error_code = 'THROTTLE'
await random_sleep(11, 22, 12)
except ProxyConnectionError:
self.error_code = 'PROXY ERROR'
if self.proxies:
self.logger.error('Proxy connection error, swapping proxy.')
proxy = self.proxy
while proxy == self.proxy:
self.new_proxy()
else:
self.logger.error('Proxy connection error')
await sleep(5)
except (ex.MalformedResponseException, ex.UnexpectedResponseException) as e:
self.logger.warning(e)
self.error_code = 'MALFORMED RESPONSE'
await random_sleep(10, 14, 11)
try:
if not response:
raise MaxRetriesException
except Exception:
raise MaxRetriesException
self.last_request = time()
if action:
# pad for time that action would require
self.last_action = self.last_request + action
responses = response.get('responses')
if chain:
delta = responses.get('GET_INVENTORY', {}).get('inventory_delta', {})
timestamp = delta.get('new_timestamp_ms')
inventory_items = delta.get('inventory_items', [])
if inventory_items:
self.update_inventory(inventory_items)
self.inventory_timestamp = timestamp or self.inventory_timestamp
d_hash = responses.get('DOWNLOAD_SETTINGS', {}).get('hash')
self.download_hash = d_hash or self.download_hash
if self.check_captcha(responses):
self.logger.warning('{} has encountered a CAPTCHA, trying to solve'.format(self.username))
self.g['captchas'] += 1
await self.handle_captcha(responses)
return responses
def travel_speed(self, point):
'''Fast calculation of travel speed to point'''
if self.busy.locked():
return None
time_diff = max(time() - self.last_request, config.SCAN_DELAY)
if time_diff > 60:
self.error_code = None
distance = get_distance(self.location, point)
# conversion from meters/second to miles/hour
speed = (distance / time_diff) * 2.236936
return speed
async def bootstrap_visit(self, point):
for _ in range(0,3):
if await self.visit(point, bootstrap=True):
return True
self.error_code = '∞'
self.simulate_jitter(0.00005)
return False
async def visit(self, point, bootstrap=False):
"""Wrapper for self.visit_point - runs it a few times before giving up
Also is capable of restarting in case an error occurs.
"""
visited = False
try:
altitude = self.spawns.get_altitude(point)
altitude = uniform(altitude - 1, altitude + 1)
self.location = point + [altitude]
self.api.set_position(*self.location)
if not self.logged_in:
if not await self.login():
return False
return await self.visit_point(point, bootstrap=bootstrap)
except (ex.AuthException, ex.NotLoggedInException):
self.logger.warning('{} is not authenticated.'.format(self.username))
self.error_code = 'NOT AUTHENTICATED'
await sleep(1)
await self.swap_account(reason='login failed')
except CaptchaException:
self.error_code = 'CAPTCHA'
self.g['captchas'] += 1
await sleep(1)
await self.bench_account()
except CaptchaSolveException:
self.error_code = 'CAPTCHA'
await sleep(1)
await self.swap_account(reason='solving CAPTCHA failed')
except MaxRetriesException:
self.logger.warning('Hit the maximum number of attempt retries.')
self.error_code = 'MAX RETRIES'
except ex.TempHashingBanException:
self.error_code = 'HASHING BAN'
self.logger.error('Temporarily banned from hashing server for using invalid keys.')
await sleep(185)
except ex.BannedAccountException:
self.error_code = 'BANNED'
self.logger.warning('{} is banned'.format(self.username))
await sleep(1)
await self.remove_account()
except ProxyConnectionError:
self.error_code = 'PROXY ERROR'
if self.proxies:
self.logger.error('Proxy connection error, swapping proxy.')
proxy = self.proxy
while proxy == self.proxy:
self.new_proxy()
else:
self.logger.error('Proxy connection error')
await sleep(5)
except ex.NianticIPBannedException:
self.error_code = 'IP BANNED'
if config.CONTROL_SOCKS:
self.swap_circuit('IP ban')
await random_sleep(minimum=25, maximum=35)
elif self.proxies:
self.logger.warning('Swapping out {} due to IP ban.'.format(
self.proxy))
proxy = self.proxy
while proxy == self.proxy:
self.new_proxy()
await random_sleep(minimum=12, maximum=20)
else:
self.logger.error('IP banned.')
await sleep(150)
except ex.HashServerException as e:
self.logger.warning(e)
self.error_code = 'HASHING ERROR'
except ex.PgoapiError as e:
self.logger.exception('pgoapi error')
self.error_code = 'PGOAPI ERROR'
except Exception:
self.logger.exception('A wild exception appeared!')
self.error_code = 'EXCEPTION'
await sleep(1)
return False
async def visit_point(self, point, bootstrap=False):
if bootstrap:
self.error_code = '∞'
else:
self.error_code = '!'
latitude, longitude = point
self.logger.info('Visiting {0[0]:.4f},{0[1]:.4f}'.format(point))
start = time()
rounded = round_coords(point, 4)
if config.CACHE_CELLS and rounded in self.cell_ids:
cell_ids = list(self.cell_ids[rounded])
else:
cell_ids = get_cell_ids(*rounded, radius=500)
if config.CACHE_CELLS:
try:
self.cell_ids[rounded] = array('L', cell_ids)
except OverflowError:
self.cell_ids[rounded] = tuple(cell_ids)
since_timestamp_ms = [0] * len(cell_ids)
request = self.api.create_request()
request.get_map_objects(cell_id=cell_ids,
since_timestamp_ms=since_timestamp_ms,
latitude=latitude,
longitude=longitude)
diff = self.last_gmo + config.SCAN_DELAY - time()
if diff > 0:
await sleep(diff + .25)
responses = await self.call(request)
self.last_gmo = time()
map_objects = responses.get('GET_MAP_OBJECTS', {})
sent = False
pokemon_seen = 0
forts_seen = 0
points_seen = 0
if map_objects.get('status') == 3:
raise ex.BannedAccountException('GMO code 3')
elif map_objects.get('status') != 1:
self.logger.warning(
'MapObjects code: {}'.format(map_objects.get('status')))
self.empty_visits += 1
if self.empty_visits > 3:
reason = '{} empty visits'.format(self.empty_visits)
await self.swap_account(reason)
raise ex.UnexpectedResponseException
time_of_day = map_objects.get('time_of_day', 0)
if config.ITEM_LIMITS and self.bag_full():
await self.clean_bag()
for map_cell in map_objects['map_cells']:
request_time_ms = map_cell['current_timestamp_ms']
for pokemon in map_cell.get('wild_pokemons', []):
pokemon_seen += 1
# Accurate times only provided in the last 90 seconds
invalid_tth = (
pokemon['time_till_hidden_ms'] < 0 or
pokemon['time_till_hidden_ms'] > 90000
)
normalized = self.normalize_pokemon(
pokemon,
request_time_ms
)
if invalid_tth:
despawn_time = self.spawns.get_despawn_time(
normalized['spawn_id'], normalized['seen'])
if despawn_time:
normalized['expire_timestamp'] = despawn_time
normalized['time_till_hidden_ms'] = (
despawn_time * 1000) - request_time_ms
normalized['valid'] = 'fixed'
else:
normalized['valid'] = False
else:
normalized['valid'] = True
if config.NOTIFY and self.notifier.eligible(normalized):
if config.ENCOUNTER:
normalized.update(await self.encounter(pokemon))
sent = self.notify(normalized, time_of_day)
if (normalized not in SIGHTING_CACHE and
normalized not in MYSTERY_CACHE):
self.account_seen += 1
if (config.ENCOUNTER == 'all' and
'individual_attack' not in normalized):
try:
normalized.update(await self.encounter(pokemon))
except Exception:
self.logger.exception('Exception during encounter.')
self.db_processor.add(normalized)
for fort in map_cell.get('forts', []):
if not fort.get('enabled'):
continue
forts_seen += 1
if fort.get('type') == 1: # pokestops
if 'lure_info' in fort:
norm = self.normalize_lured(fort, request_time_ms)
pokemon_seen += 1
if norm not in SIGHTING_CACHE:
self.account_seen += 1
self.db_processor.add(norm)
pokestop = self.normalize_pokestop(fort)
self.db_processor.add(pokestop)
if self.pokestops and not self.bag_full() and time() > self.next_spin:
cooldown = fort.get('cooldown_complete_timestamp_ms')
if not cooldown or time() > cooldown / 1000:
await self.spin_pokestop(pokestop)
else:
self.db_processor.add(self.normalize_gym(fort))
if config.MORE_POINTS or bootstrap:
for point in map_cell.get('spawn_points', []):
points_seen += 1
try:
p = (point['latitude'], point['longitude'])
if self.spawns.have_point(p) or not Bounds.contain(p):
continue
self.spawns.add_cell_point(p)
except (KeyError, TypeError):
self.logger.warning('Spawn point exception ignored. {}'.format(point))
pass
if config.INCUBATE_EGGS and len(self.unused_incubators) > 0 and len(self.eggs) > 0:
await self.incubate_eggs()
if pokemon_seen > 0:
self.error_code = ':'
self.total_seen += pokemon_seen
self.g['seen'] += pokemon_seen
self.empty_visits = 0
if CIRCUIT_FAILURES:
CIRCUIT_FAILURES[self.proxy] = 0
else:
self.empty_visits += 1
if forts_seen == 0:
self.error_code = '0 SEEN'
else:
self.error_code = ','
if self.empty_visits > 3:
reason = '{} empty visits'.format(self.empty_visits)
await self.swap_account(reason)
if CIRCUIT_FAILURES:
CIRCUIT_FAILURES[self.proxy] += 1
if CIRCUIT_FAILURES[self.proxy] > 20:
reason = '{} empty visits'.format(
CIRCUIT_FAILURES[self.proxy])
self.swap_circuit(reason)
self.visits += 1
if config.MAP_WORKERS:
self.worker_dict.update([(self.worker_no,
((latitude, longitude), start, self.speed, self.total_seen,
self.visits, pokemon_seen, sent))])
self.logger.info(
'Point processed, %d Pokemon and %d forts seen!',
pokemon_seen,
forts_seen,
)
self.update_accounts_dict(auth=False)
return pokemon_seen + forts_seen + points_seen
async def spin_pokestop(self, pokestop):
self.error_code = '$'
pokestop_location = pokestop['lat'], pokestop['lon']
distance = get_distance(self.location, pokestop_location)
# permitted interaction distance - 2 (for some jitter leeway)
# estimation of spinning speed limit
if distance > 38 or self.speed > 22:
return False
# randomize location up to ~1.4 meters
self.simulate_jitter(amount=0.00001)
request = self.api.create_request()
request.fort_details(fort_id = pokestop['external_id'],
latitude = pokestop['lat'],
longitude = pokestop['lon'])
responses = await self.call(request, action=1.5)
name = responses.get('FORT_DETAILS', {}).get('name')
request = self.api.create_request()
request.fort_search(fort_id = pokestop['external_id'],
player_latitude = self.location[0],
player_longitude = self.location[1],
fort_latitude = pokestop['lat'],
fort_longitude = pokestop['lon'])
responses = await self.call(request, action=1)
result = responses.get('FORT_SEARCH', {}).get('result', 0)
if result == 1:
self.logger.info('Spun {}.'.format(name))
elif result == 2:
self.logger.info('The server said {n} was out of spinning range. {d:.1f}m {s:.1f}MPH'.format(
n=name, d=distance, s=self.speed))
elif result == 3:
self.logger.warning('{} was in the cooldown period.'.format(name))
elif result == 4:
self.logger.warning('Could not spin {n} because inventory was full. {s}'.format(
n=name, s=sum(self.items.values())))
elif result == 5:
self.logger.warning('Could not spin {} because the daily limit was reached.'.format(name))
self.pokestops = False
else:
self.logger.warning('Failed spinning {n}: {r}'.format(n=name, r=result))
self.next_spin = time() + config.SPIN_COOLDOWN
self.error_code = '!'
return responses
async def encounter(self, pokemon):
pokemon_point = pokemon['latitude'], pokemon['longitude']
distance_to_pokemon = get_distance(self.location, pokemon_point)
self.error_code = '~'
if distance_to_pokemon > 47:
percent = 1 - (46 / distance_to_pokemon)
lat_change = (self.location[0] - pokemon['latitude']) * percent
lon_change = (self.location[1] - pokemon['longitude']) * percent
self.location = [
self.location[0] - lat_change,
self.location[1] - lon_change,
uniform(self.location[2] - 3, self.location[2] + 3)
]
self.api.set_position(*self.location)
delay_required = (distance_to_pokemon * percent) / 8
if delay_required < 1.5:
delay_required = triangular(1.25, 4, 2)
else:
self.simulate_jitter()
delay_required = triangular(1.25, 4, 2)
if time() - self.last_request < delay_required:
await sleep(delay_required)
request = self.api.create_request()
request = request.encounter(encounter_id=pokemon['encounter_id'],
spawn_point_id=pokemon['spawn_point_id'],
player_latitude=self.location[0],
player_longitude=self.location[1])
responses = await self.call(request, action=2.25)
response = responses.get('ENCOUNTER', {})
pokemon_data = response.get('wild_pokemon', {}).get('pokemon_data', {})
if 'cp' in pokemon_data:
for iv in ('individual_attack',
'individual_defense',
'individual_stamina'):
if iv not in pokemon_data:
pokemon_data[iv] = 0
pokemon_data['probability'] = response.get(
'capture_probability', {}).get('capture_probability')
self.error_code = '!'
return pokemon_data
def bag_full(self):
return sum(self.items.values()) >= self.item_capacity
async def clean_bag(self):
self.error_code = '|'
rec_items = {}
limits = config.ITEM_LIMITS
for item, count in self.items.items():
if item in limits and count > limits[item]:
discard = count - limits[item]
if discard > 50:
rec_items[item] = randint(50, discard)
else:
rec_items[item] = discard
removed = 0
for item, count in rec_items.items():
request = self.api.create_request()
request.recycle_inventory_item(item_id=item, count=count)
responses = await self.call(request, action=2)
if responses.get('RECYCLE_INVENTORY_ITEM', {}).get('result', 0) != 1:
self.logger.warning("Failed to remove item %d", item)
else:
removed += count
self.logger.info("Removed %d items", removed)
self.error_code = '!'
async def incubate_eggs(self):
# copy the list, as self.call could modify it as it updates the inventory
incubators = self.unused_incubators.copy()
for egg in sorted(self.eggs.values(), key=lambda x: x.get('egg_km_walked_target')):
if egg.get('egg_incubator_id'):
continue
if not incubators:
break
inc = incubators.pop()
if inc.get('item_id') == 901 or egg.get('egg_km_walked_target', 0) > 9:
request = self.api.create_request()
request.use_item_egg_incubator(item_id=inc.get('id'), pokemon_id=egg.get('id'))
responses = await self.call(request, action=5)
ret = responses.get('USE_ITEM_EGG_INCUBATOR', {}).get('result', 0)
if ret == 4:
self.logger.warning("Failed to use incubator because it was already in use.")
elif ret != 1:
self.logger.warning("Failed to apply incubator {} on {}, code: {}".format(
inc.get('id', 0), egg.get('id', 0), ret))
async def handle_captcha(self, responses):
if self.num_captchas >= config.CAPTCHAS_ALLOWED:
self.logger.error("{} encountered too many CAPTCHAs, removing.".format(self.username))
raise CaptchaException
self.error_code = 'C'
self.num_captchas += 1
self.create_session()
try:
params = {
'key': config.CAPTCHA_KEY,
'method': 'userrecaptcha',
'googlekey': '6LeeTScTAAAAADqvhqVMhPpr_vB9D364Ia-1dSgK',
'pageurl': responses.get('CHECK_CHALLENGE', {}).get('challenge_url'),
'json': 1
}
async with self.session.post('http://2captcha.com/in.php', params=params, timeout=10) as resp:
response = await resp.json()
except Exception as e:
self.logger.error('Got an error while trying to solve CAPTCHA. '
'Check your API Key and account balance.')
raise CaptchaSolveException from e
code = response.get('request')
if response.get('status') != 1:
if code in ('ERROR_WRONG_USER_KEY', 'ERROR_KEY_DOES_NOT_EXIST', 'ERROR_ZERO_BALANCE'):
config.CAPTCHA_KEY = None
self.logger.error('2Captcha reported: {}, disabling CAPTCHA solving'.format(code))
else:
self.logger.error("Failed to submit CAPTCHA for solving: {}".format(code))
raise CaptchaSolveException
try:
# Get the response, retry every 5 seconds if it's not ready
params = {
'key': config.CAPTCHA_KEY,
'action': 'get',
'id': code,
'json': 1
}
while True:
async with self.session.get("http://2captcha.com/res.php", params=params, timeout=20) as resp:
response = await resp.json()
if response.get('request') != 'CAPCHA_NOT_READY':
break
await sleep(5)
except Exception as e:
self.logger.error('Got an error while trying to solve CAPTCHA. '
'Check your API Key and account balance.')
raise CaptchaSolveException from e
token = response.get('request')
if not response.get('status') == 1:
self.logger.error("Failed to get CAPTCHA response: {}".format(token))
raise CaptchaSolveException
request = self.api.create_request()
request.verify_challenge(token=token)
try:
responses = await self.call(request, action=4)
self.update_accounts_dict()
self.logger.warning("Successfully solved CAPTCHA")
except CaptchaException:
self.logger.warning("CAPTCHA #{} for {} was not solved correctly, trying again".format(
captcha_id, self.username))
# try again
await self.handle_captcha(responses)
def simulate_jitter(self, amount=0.00002):
'''Slightly randomize location, by up to ~2.8 meters by default.'''
self.location = [
uniform(self.location[0] - amount,
self.location[0] + amount),
uniform(self.location[1] - amount,
self.location[1] + amount),
uniform(self.location[2] - 1,
self.location[2] + 1)
]
self.api.set_position(*self.location)
def notify(self, norm, time_of_day):
self.error_code = '*'
notified = self.notifier.notify(norm, time_of_day)
if notified:
self.g['sent'] += 1
self.error_code = '!'
return notified
def update_accounts_dict(self, captcha=False, banned=False, auth=True):
self.account['captcha'] = captcha
self.account['banned'] = banned
self.account['location'] = self.location
self.account['time'] = self.last_request
self.account['inventory_timestamp'] = self.inventory_timestamp
self.account['items'] = self.items
self.account['level'] = self.player_level
if auth and self.api._auth_provider:
self.account['refresh'] = self.api._auth_provider._refresh_token
if self.api._auth_provider.check_access_token():
self.account['auth'] = self.api._auth_provider._access_token
self.account['expiry'] = self.api._auth_provider._access_token_expiry
else:
self.account['auth'] = self.account['expiry'] = None
self.accounts[self.username] = self.account
async def remove_account(self):
self.error_code = 'REMOVING'
self.logger.warning('Removing {} due to ban.'.format(self.username))
self.update_accounts_dict(banned=True)
await self.new_account()
async def bench_account(self):
self.error_code = 'BENCHING'
self.logger.warning('Swapping {} due to CAPTCHA.'.format(self.username))
self.update_accounts_dict(captcha=True)
self.captcha_queue.put(self.account)
await self.new_account()
async def swap_account(self, reason='', lock=False):
self.error_code = 'SWAPPING'
self.logger.warning('Swapping out {u} because {r}.'.format(
u=self.username, r=reason))
if lock:
await self.busy.acquire()
self.update_accounts_dict()
while self.extra_queue.empty():
if self.killed:
return False
await sleep(15)
self.extra_queue.put(self.account)
await self.new_account(lock)
async def new_account(self, lock=False):
captcha = False
while self.extra_queue.empty():
if config.CAPTCHA_KEY and not self.captcha_queue.empty():
captcha = True
break
if self.killed:
return False
await sleep(15)
if captcha:
self.account = self.captcha_queue.get()
else:
self.account = self.extra_queue.get()
self.username = self.account.get('username')
self.location = self.account.get('location', get_start_coords(self.worker_no))
self.inventory_timestamp = self.account.get('inventory_timestamp')
self.player_level = self.account.get('player_level')
self.last_request = self.account.get('time', 0)
self.last_action = self.last_request
self.last_gmo = self.last_request
self.items = self.account.get('items', {})
self.num_captchas = 0
self.pokestops = config.SPIN_POKESTOPS
self.eggs = {}
self.unused_incubators = []
self.initialize_api()
self.error_code = None
if lock:
self.busy.release()
def seen_per_second(self, now):
try:
seconds_active = now - self.account_start
if seconds_active < 120:
return None
return self.account_seen / seconds_active
except TypeError:
return None
def kill(self):
"""Marks worker as killed
Killed worker won't be restarted.
"""
self.error_code = 'KILLED'
self.killed = True
if self.ever_authenticated:
self.update_accounts_dict()
@classmethod
def create_session(cls):
try:
return cls.session
except AttributeError:
cls.session = ClientSession(loop=cls.loop)
@classmethod
def close_session(cls):
try:
cls.session.close()
except Exception:
pass
@staticmethod
def normalize_pokemon(raw, now):
"""Normalizes data coming from API into something acceptable by db"""
return {
'type': 'pokemon',
'encounter_id': raw['encounter_id'],
'pokemon_id': raw['pokemon_data']['pokemon_id'],
'expire_timestamp': round((now + raw['time_till_hidden_ms']) / 1000),
'lat': raw['latitude'],
'lon': raw['longitude'],
'spawn_id': get_spawn_id(raw),
'time_till_hidden_ms': raw['time_till_hidden_ms'],
'seen': round(raw['last_modified_timestamp_ms'] / 1000)
}
@staticmethod
def normalize_lured(raw, now):
return {
'type': 'pokemon',
'encounter_id': raw['lure_info']['encounter_id'],
'pokemon_id': raw['lure_info']['active_pokemon_id'],
'expire_timestamp': raw['lure_info']['lure_expires_timestamp_ms'] / 1000,
'lat': raw['latitude'],
'lon': raw['longitude'],
'spawn_id': -1,
'time_till_hidden_ms': raw['lure_info']['lure_expires_timestamp_ms'] - now,
'valid': 'pokestop'
}
@staticmethod
def normalize_gym(raw):
return {
'type': 'fort',
'external_id': raw['id'],
'lat': raw['latitude'],
'lon': raw['longitude'],
'team': raw.get('owned_by_team', 0),
'prestige': raw.get('gym_points', 0),
'guard_pokemon_id': raw.get('guard_pokemon_id', 0),
'last_modified': round(raw['last_modified_timestamp_ms'] / 1000),
}
@staticmethod
def normalize_pokestop(raw):
return {
'type': 'pokestop',
'external_id': raw['id'],
'lat': raw['latitude'],
'lon': raw['longitude']
}
@staticmethod
def check_captcha(responses):
challenge_url = responses.get('CHECK_CHALLENGE', {}).get('challenge_url', ' ')
verify = responses.get('VERIFY_CHALLENGE', {})
success = verify.get('success')
if challenge_url != ' ' and not success:
if config.CAPTCHA_KEY and not verify:
return True
else:
raise CaptchaException
else:
return False
@property
def status(self):
"""Returns status message to be displayed in status screen"""
if self.error_code:
msg = self.error_code
else:
msg = 'P{seen}'.format(
seen=self.total_seen
)
return '[W{worker_no}: {msg}]'.format(
worker_no=self.worker_no,
msg=msg
)
class BusyLock(Lock):
def acquire_now(self):
if not self._locked and all(w.cancelled() for w in self._waiters):
self._locked = True
return True
else:
return False
class MaxRetriesException(Exception):
"""Raised when the maximum number of request retries is reached"""
class CaptchaException(Exception):
"""Raised when a CAPTCHA is needed."""
class CaptchaSolveException(Exception):
"""Raised when solving a CAPTCHA has failed."""
|
Claod44/GokemonReborn
|
pokeminer/worker.py
|
Python
|
mit
| 50,226
|
[
"VisIt"
] |
d7674be05f6a68ea5f7c4f716fc8db9b0f7a846852c29f8dda258d0619595e7e
|
import cPickle as pickle
from cStringIO import StringIO
import numpy as np
import scipy.sparse
from numpy.testing import (assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_equal)
from ..naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Data is 6 random points in a 100 dimensional space classified to
# three classes.
X2 = np.random.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_mnnb():
"""
Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
clf = MultinomialNB(alpha=2, fit_prior=False).fit(X2, y2)
y_pred = clf.predict(X2)
store = StringIO()
pickle.dump(clf, store)
clf = pickle.load(StringIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in ([BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]):
clf = MultinomialNB().fit([[0, 1], [1, 3], [4, 0]], y)
assert_equal(clf.predict([4, 1]), 2)
assert_equal(clf.predict_proba([0, 1]).shape, (1, 2))
assert_equal(clf.predict_proba([[0, 1], [1, 0]]).sum(axis=1), [1, 1])
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in ([BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]):
clf = MultinomialNB().fit([[0, 1], [1, 3], [4, 0]], y)
assert_equal(clf.predict_proba([0, 1]).shape, (1, 3))
assert_equal(clf.predict_proba([[0, 1], [1, 0]]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([1, 5])), 1)
assert_almost_equal(np.sum(clf.predict_proba([3, 0])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_sample_weight():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_)
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
|
cdegroc/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 4,208
|
[
"Gaussian"
] |
42703ae8d5001c516b46a195072be2ce77ba95390a1977122efebb7001c42ec9
|
import numpy as np
import os
import h5py
import json
from .utils import asymptotic_error_quantile, bootstrap,\
dict_to_margins, margins_to_dict, matrix_to_list
from sklearn.utils import check_random_state
from scipy.stats import gaussian_kde, norm
from .vinecopula import Conversion
class ListDependenceResult(list):
"""The result from the Conservative Estimation.
The results gather in the list must have the same configurations: the same
copula families, vine structure, grid.
Parameters
----------
margins : list of OpenTURNS distributions
The OT distributions.
families : array
The matrix array of the families.
vine_structure : array
The matrix array of the R-vine. If None, it is considered as Gaussian.
bounds_tau : array,
The matrix array of the bounds for the dependence parameters.
dep_param : array
The dependence parameters.
input_sample : array
The input sample.
output_sample : array
The output sample.
q_func : callable or None
The output quantity of intereset function.
run_type : str
The type of estimation: independence, grid-search, iterative, ...
grid_type : str
The type of grid use if it was a grid search.
random_state : int, RandomState or None,
The random state of the computation.
"""
def __init__(self,
margins=None,
families=None,
vine_structure=None,
bounds_tau=None,
fixed_params=None,
dep_params=None,
input_samples=None,
output_samples=None,
q_func=None,
run_type=None,
n_evals=None,
grid_type=None,
random_state=None,
**kwargs):
self.margins = margins
self.families = families
self.vine_structure = vine_structure
self.bounds_tau = bounds_tau
self.fixed_params = fixed_params
self._q_func = q_func
self.run_type = run_type
self.grid_type = grid_type
self.input_dim = len(margins)
self.corr_dim = int(self.input_dim * (self.input_dim - 1) / 2)
self.grid_filename = None
if "grid_filename" in kwargs:
self.grid_filename = kwargs["grid_filename"]
self.lhs_grid_criterion = None
if "lhs_grid_criterion" in kwargs:
self.lhs_grid_criterion = kwargs["lhs_grid_criterion"]
self.output_id = 0
if "output_id" in kwargs:
self.output_id = kwargs["output_id"]
if run_type in ['grid-search', 'iterative']:
assert output_samples is not None, \
"Add some output sample if you're adding dependence parameters"
for k, dep_param in enumerate(dep_params):
input_sample = None if input_samples is None else input_samples[k]
output_sample = output_samples[k]
result = DependenceResult(margins=margins,
families=families,
vine_structure=vine_structure,
fixed_params=fixed_params,
dep_param=dep_param,
input_sample=input_sample,
output_sample=output_sample,
q_func=q_func,
random_state=random_state,
output_id=self.output_id)
self.append(result)
if output_sample.shape[0] == output_sample.size:
self.output_dim = 1
else:
self.output_dim = output_sample.shape[1]
elif run_type == 'independence':
# There is data and we suppose it's at independence or a fixed params
result = DependenceResult(margins=margins,
families=families,
vine_structure=vine_structure,
fixed_params=fixed_params,
dep_param=0,
input_sample=input_samples,
output_sample=output_samples[0],
q_func=q_func,
random_state=random_state,
output_id=self.output_id)
self.families = 0
self.vine_structure = 0
self.bounds_tau = 0
self.fixed_params = 0
self.grid_type = 0
self.append(result)
self.output_dim = result.output_dim
elif run_type == 'incomplete':
# There is data and we suppose it's at independence or a fixed params
result = DependenceResult(margins=margins,
families=families,
vine_structure=vine_structure,
fixed_params=fixed_params,
dep_param=0,
input_sample=input_samples,
output_sample=output_samples[0],
q_func=q_func,
random_state=random_state,
output_id=self.output_id)
self.grid_type = 0
self.append(result)
self.output_dim = result.output_dim
self.rng = check_random_state(random_state)
self._bootstrap_samples = None
def __add__(self, results):
"""
"""
if self.n_params > 0:
# Assert the results are the same categories
np.testing.assert_equal(
self.margins, results.margins, err_msg="Same margins")
np.testing.assert_array_equal(
self.families, results.families, err_msg="Different copula families")
np.testing.assert_array_equal(
self.vine_structure, results.vine_structure, err_msg="Different copula structures")
np.testing.assert_array_equal(
self.bounds_tau, results.bounds_tau, err_msg="Different bounds on Tau")
np.testing.assert_array_equal(
self.fixed_params, results.fixed_params, err_msg="Different fixed params")
np.testing.assert_allclose(
self.dep_params, results.dep_params, err_msg="Different dependence parameters")
assert self.run_type == results.run_type, "Different run type"
assert self.grid_type == results.grid_type, "Different grid type"
assert self.grid_filename == results.grid_filename, "Different grid type"
assert self.lhs_grid_criterion == results.lhs_grid_criterion, "Different grid type"
input_samples = []
output_samples = []
for res1, res2 in zip(self, results):
if res1.input_sample is not None:
input_samples.append(
np.r_[res1.input_sample, res2.input_sample])
output_samples.append(
np.r_[res1.output_sample, res2.output_sample])
if len(input_samples) == 0:
input_samples = None
new_results = ListDependenceResult(
margins=self.margins,
families=self.families,
vine_structure=self.vine_structure,
bounds_tau=self.bounds_tau,
fixed_params=self.fixed_params,
dep_params=self.dep_params,
input_samples=input_samples,
output_samples=output_samples,
grid_type=self.grid_type,
q_func=self.q_func,
run_type=self.run_type,
grid_filename=self.grid_filename,
lhs_grid_criterion=self.lhs_grid_criterion,
output_id=self.output_id)
return new_results
def extend(self, value):
super(ListDependenceResult, self).extend(value)
self.families = value.families
@property
def output_id(self):
"""Id of the output.
"""
return self._output_id
@output_id.setter
def output_id(self, output_id):
for result in self:
result.output_id = output_id
self._output_id = output_id
@property
def q_func(self):
"""The quantity function
"""
return self._q_func
@q_func.setter
def q_func(self, q_func):
assert callable(q_func), "Function must be callable"
if self.n_params == 0:
print("There is no data...")
else:
for result in self:
result.q_func = q_func
self._q_func = q_func
@property
def pairs(self):
"""The dependent pairs of the problem.
"""
if self.families is None:
print('Family matrix was not defined')
else:
return matrix_to_list(self.families)[1]
@property
def dep_params(self):
"""The dependence parameters.
"""
if self.n_params == 0:
print("There is no data...")
else:
return np.asarray([result.dep_param for result in self])
@property
def kendalls(self):
"""The Kendall's tau dependence measure.
"""
if self.n_params == 0:
print("There is no data...")
else:
return np.asarray([result.kendall_tau for result in self])
@property
def n_pairs(self):
"""The number of dependente pairs.
"""
if self.n_params == 0:
return 0
else:
return (self.families > 0).sum()
@property
def output_samples(self):
if self.n_params == 0:
print("There is no data...")
else:
return [result.output_sample for result in self]
@property
def input_samples(self):
if self.n_params == 0:
print("There is no data...")
else:
return [result.input_sample for result in self]
@property
def n_input_sample(self):
"""The sample size for each dependence parameter.
"""
# TODO: maybe not all the samples have the same number of observations...
if self.n_params == 0:
return 0
else:
return self[0].n_sample
@property
def n_evals(self):
"""The total number of observations.
"""
return self.n_params*self.n_input_sample
@property
def n_params(self):
"""The number of dependence parameters.
"""
return len(self)
@property
def quantities(self):
"""The quantity values of each parameters.
"""
if self.n_params == 0:
print("There is no data...")
else:
return np.asarray([result.quantity for result in self])
@property
def min_result(self):
"""The dependence parameter that minimizes the output quantity.
"""
if self.n_params == 0:
print("There is no data...")
else:
return self[self.quantities.argmin()]
@property
def min_quantity(self):
"""The minimum quantity from all the dependence parameters.
"""
if self.n_params == 0:
print("There is no data...")
else:
return self.quantities.min()
@property
def full_dep_params(self):
"""The dependence parameters with the columns from the fixed parameters.
"""
if self.n_params == 0:
print("There is no data...")
else:
return np.asarray([result.full_dep_params for result in self])
@property
def bootstrap_samples(self):
"""The computed bootstrap sample of all the dependence parameters.
"""
sample = [result._bootstrap_sample for result in self]
if not any((boot is None for boot in sample)):
return np.asarray(sample)
else:
raise AttributeError('The boostrap must be computed first')
def compute_bootstraps(self, n_bootstrap=1000, inplace=True):
"""Compute bootstrap of the quantity for each element of the list
"""
if self.n_params == 0:
print("There is no data...")
else:
for result in self:
result.compute_bootstrap(n_bootstrap)
if not inplace:
return self.bootstrap_samples
def to_hdf(self, path_or_buf, input_names=[], output_names=[], verbose=False, with_input_sample=True):
"""Write the contained data to an HDF5 file using HDFStore.
Parameters
----------
path_or_buf : the path (string) or HDFStore object
The path of the file or an hdf instance.
input_names : list of strings, optional
The name of the inputs variables.
output_names : list of strings, optional
The name of the outputs.
"""
filename, extension = os.path.splitext(path_or_buf)
dirname = os.path.dirname(path_or_buf)
if not os.path.exists(dirname):
os.makedirs(dirname)
assert extension in ['.hdf', '.hdf5'], "File extension should be hdf"
# List of input variable names
if input_names:
assert len(input_names) == self.input_dim, \
AttributeError("Dimension problem for input_names")
else:
for i in range(self.input_dim):
input_names.append("x_%d" % (i + 1))
# List of output variable names
if output_names:
assert len(output_names) == self.output_dim, \
AttributeError("Dimension problem for output_names")
else:
for i in range(self.output_dim):
output_names.append("y_%d" % (i + 1))
margin_dict = margins_to_dict(self.margins)
filename_exists = True
k = 0
while filename_exists:
# If the file has the same run configuration
try:
with h5py.File(path_or_buf, 'a') as hdf_store:
# If the file already exists and already has data
if hdf_store.attrs.keys():
# Check the attributes of the file, if it already exists
np.testing.assert_allclose(
hdf_store['dependence_params'].value, self.dep_params, err_msg="Different dependence parameters")
assert hdf_store.attrs['Input Dimension'] == self.input_dim, "Different input dimension"
assert hdf_store.attrs['Output Dimension'] == self.output_dim, "Different output dimension"
assert hdf_store.attrs['Run Type'] == self.run_type, "Different run type"
np.testing.assert_array_equal(
hdf_store.attrs['Copula Families'], self.families, err_msg="Different copula families")
if 'Fixed Parameters' in hdf_store.attrs.keys():
np.testing.assert_array_equal(
hdf_store.attrs['Fixed Parameters'], self.fixed_params, err_msg="Different fixed copulas")
elif self._fixed_pairs:
# Save only if there is no fixed params
raise ValueError(
'It should not have constraints to be in the same output file.')
if 'Bounds Tau' in hdf_store.attrs.keys():
np.testing.assert_array_equal(
hdf_store.attrs['Bounds Tau'], self.bounds_tau, err_msg="Different bounds on Tau")
elif self._fixed_pairs:
raise ValueError(
'It should not have constraints to be in the same output file.')
np.testing.assert_array_equal(
hdf_store.attrs['Copula Structure'], self.vine_structure, err_msg="Different vine structures")
np.testing.assert_array_equal(
hdf_store.attrs['Input Names'], input_names, err_msg="Different Input Names")
np.testing.assert_array_equal(
hdf_store.attrs['Output Names'], output_names, err_msg="Different output Names")
loaded_margin_dict = json.loads(
hdf_store.attrs['Margins'])
assert all(loaded_margin_dict[str(
k)] == margin_dict[k] for k in margin_dict), "Not the same dictionary"
if self.run_type == 'grid-search':
assert hdf_store.attrs['Grid Type'] == self.grid_type, "Different grid type"
else:
# We save the attributes in the empty new file
hdf_store.create_dataset(
'dependence_params', data=self.dep_params)
# Margins
hdf_store.attrs['Margins'] = json.dumps(
margin_dict, ensure_ascii=False)
hdf_store.attrs['Copula Families'] = self.families
hdf_store.attrs['Copula Structure'] = self.vine_structure
hdf_store.attrs['Bounds Tau'] = self.bounds_tau
hdf_store.attrs['Grid Size'] = self.n_params
hdf_store.attrs['Input Dimension'] = self.input_dim
hdf_store.attrs['Output Dimension'] = self.output_dim
hdf_store.attrs['Fixed Parameters'] = self.fixed_params
hdf_store.attrs['Run Type'] = self.run_type
hdf_store.attrs['Input Names'] = input_names
hdf_store.attrs['Output Names'] = output_names
if self.run_type == 'grid-search':
hdf_store.attrs['Grid Type'] = self.grid_type
if self.grid_filename is not None:
hdf_store.attrs['Grid Filename'] = os.path.basename(
self.grid_filename)
if self.grid_type == 'lhs':
hdf_store.attrs['LHS Criterion'] = self.lhs_grid_criterion
# Check the number of experiments
grp_number = 0
list_groups = hdf_store.keys()
list_groups.remove('dependence_params')
list_groups = [int(g) for g in list_groups]
list_groups.sort()
# If there is already groups in the file
if list_groups:
grp_number = list_groups[-1] + 1
grp = hdf_store.create_group(str(grp_number))
for i in range(self.n_params):
grp_i = grp.create_group(str(i))
grp_i.attrs['n'] = self[i].n_sample
grp_i.create_dataset(
'output_sample', data=self[i].output_sample)
if with_input_sample:
grp_i.create_dataset(
'input_sample', data=self[i].input_sample)
filename_exists = False
except AssertionError as msg:
print('File %s has different configurations' % (path_or_buf))
if verbose:
print(str(msg))
path_or_buf = '%s_num_%d%s' % (filename, k, extension)
k += 1
if verbose:
print('Data saved in %s' % (path_or_buf))
@classmethod
def from_hdf(cls, filepath_or_buffer, id_of_experiment='all', output_id=0,
with_input_sample=True, q_func=np.var):
"""Loads result from HDF5 file.
This class method creates an instance of :class:`~ConservativeEstimate`
by loading a HDF5 with the saved result of a previous run.
Parameters
----------
filepath_or_buffer : str
The path of the file to hdf5 file read.
id_of_experiment : str or int, optional (default='all')
The experiments to load. The hdf5 file can gather multiple
experiments with the same metadatas. The user can chose to load all
or one experiments.
output_id : int, optional (default=0)
The index of the output if the function output is multidimensional.
with_input_sample : bool, optional (default=True)
If False the samples of input observations are not loaded. Input
observations are not necessary to compute output quantity of
interests.
Returns
-------
obj : :class:`~ImpactOfDependence`
The Impact Of Dependence instance with the loaded informations.
"""
# Load of the hdf5 file
with h5py.File(filepath_or_buffer, 'r') as hdf_store:
# The file may contain multiple experiments. The user can load one
# or multiple experiments if they have similiar configurations.
if id_of_experiment == 'all':
# All groups of experiments are loaded and concatenated
list_index = hdf_store.keys()
list_index.remove('dependence_params')
else:
# Only the specified experiment is loaded
assert isinstance(id_of_experiment, int), 'It should be an int'
list_index = [str(id_of_experiment)]
params = hdf_store['dependence_params'].value
run_type = hdf_store.attrs['Run Type']
families = hdf_store.attrs['Copula Families']
vine_structure = hdf_store.attrs['Copula Structure']
#copula_type = hdf_store.attrs['Copula Type']
input_dim = hdf_store.attrs['Input Dimension']
input_names = hdf_store.attrs['Input Names']
# Many previous experiments did not have this attribute.
# The checking is temporary and should be deleted in future
# versions.
fixed_params = None
if 'Fixed Parameters' in hdf_store.attrs.keys():
fixed_params = hdf_store.attrs['Fixed Parameters']
bounds_tau = None
if 'Bounds Tau' in hdf_store.attrs.keys():
bounds_tau = hdf_store.attrs['Bounds Tau']
margins = dict_to_margins(json.loads(hdf_store.attrs['Margins']))
grid_type = None
grid_filename = None
lhs_grid_criterion = None
if run_type == 'grid-search':
grid_type = hdf_store.attrs['Grid Type']
if 'Grid Filename' in hdf_store.attrs.keys():
grid_filename = hdf_store.attrs['Grid Filename']
if grid_type == 'lhs':
lhs_grid_criterion = hdf_store.attrs['LHS Criterion']
output_names = hdf_store.attrs['Output Names']
# For each experiment
for j_exp, index in enumerate(list_index):
grp = hdf_store[index] # Group of the experiment
input_samples = None
if with_input_sample:
input_samples = []
output_samples = []
n_samples = []
elements = [int(i) for i in grp.keys()]
for k in sorted(elements):
res = grp[str(k)]
if with_input_sample:
data_in = res['input_sample'].value
data_out = res['output_sample'].value
if with_input_sample:
input_samples.append(data_in)
output_samples.append(data_out)
n_samples.append(res.attrs['n'])
result = cls(margins=margins,
families=families,
vine_structure=vine_structure,
bounds_tau=bounds_tau,
fixed_params=fixed_params,
dep_params=params,
input_samples=input_samples,
output_samples=output_samples,
grid_type=grid_type,
q_func=q_func,
run_type=run_type,
grid_filename=grid_filename,
lhs_grid_criterion=lhs_grid_criterion,
output_id=output_id)
if j_exp == 0:
results = result
else:
results = results + result
return results
class DependenceResult(object):
"""Result from conservative estimate.
Parameters
----------
margins : list
The OT distributions.
families : array
The matrix array of the families.
vine_structure : array
The matrix array of the R-vine. If None, it is considered as Gaussian.
dep_param : array
The dependence parameters.
input_sample : array
The input sample.
output_sample : array
The output sample.
q_func : callable or None
The output quantity of intereset function.
random_state : int, RandomState or None,
The random state of the computation.
"""
def __init__(self,
margins=None,
families=None,
vine_structure=None,
fixed_params=None,
dep_param=None,
input_sample=None,
output_sample=None,
q_func=None,
output_id=0,
random_state=None):
self.margins = margins
self.families = families
self.vine_structure = vine_structure
self.fixed_params = fixed_params
self.dep_param = dep_param
self.input_sample = input_sample
self.output_sample = output_sample
self.q_func = q_func
self.rng = check_random_state(random_state)
self.output_id = output_id
self.n_sample = output_sample.shape[0]
self.input_dim = len(margins)
if output_sample.shape[0] == output_sample.size:
self.output_dim = 1
else:
self.output_dim = output_sample.shape[1]
self.corr_dim = int(self.input_dim * (self.input_dim - 1) / 2)
self._bootstrap_sample = None
self._n_bootstrap_sample = None
self._gaussian_kde = None
def compute_bootstrap(self, n_bootstrap=1000, inplace=True):
"""Bootstrap of the output quantity of interest.
Parameters
----------
n_bootstrap : int, optional
The number of bootstrap samples.
inplace : bool, optional
If true, the bootstrap sample is returned
Returns
-------
The bootstrap sample if inplace is true.
"""
self._bootstrap_sample = bootstrap(
self.output_sample_id, n_bootstrap, self.q_func)
self._n_bootstrap_sample = self._bootstrap_sample.shape[0]
if (self._bootstrap_sample is None) or (self._n_bootstrap_sample != n_bootstrap):
self.compute_bootstrap(n_bootstrap)
self._std = self._bootstrap_sample.std()
self._mean = self._bootstrap_sample.mean()
self._cov = abs(self._std/self._mean)
if not inplace:
return self._bootstrap_sample
def compute_quantity_bootstrap_ci(self, alphas=[0.025, 0.975], n_bootstrap=1000):
"""Boostrap confidence interval.
"""
if (self._bootstrap_sample is None) or (self._n_bootstrap_sample != n_bootstrap):
self.compute_bootstrap(n_bootstrap)
return np.percentile(self._bootstrap_sample, [a*100. for a in alphas]).tolist()
def compute_quantity_asymptotic_ci(self, quantity_name, quantity_param, ci=0.95):
"""Asymptotic confidence interval.
"""
quantity = self.quantity
if quantity_name == 'quantile':
density = kde_estimate(self.quantity)[0]
error = asymptotic_error_quantile(
self.n_sample, density, quantity_param)
elif quantity_name == 'probability':
error = asymptotic_error_quantile(
self.n_sample, quantity, quantity_param)
else:
raise 'Unknow quantity_name: {0}'.format(quantity_name)
gaussian_quantile = norm.ppf(1. - (1. - ci)/2.)
deviation = gaussian_quantile*error
return [quantity - deviation, quantity + deviation]
@property
def boot_cov(self):
"""Coefficient of variation.
"""
if self._cov is None:
print('Create a bootstrap sample first.')
return self._cov
@property
def boot_mean(self):
"""Mean of the quantity.
"""
if self._mean is None:
print('Create a bootstrap sample first.')
return self._mean
@property
def boot_var(self):
"""Standard deviation of the quantity
"""
if self._std is None:
print('Create a bootstrap sample first.')
return self._std
@property
def kde_estimate(self):
"""
"""
if self._gaussian_kde is not None:
return self._gaussian_kde
else:
self._gaussian_kde = gaussian_kde(self.output_sample_id)
return self._gaussian_kde
@property
def bootstrap_sample(self):
"""The computed bootstrap sample.
"""
if self._bootstrap_sample is not None:
return self._bootstrap_sample
else:
raise AttributeError('The boostrap must be computed first')
@property
def quantity(self):
"""The computed output quantity.
"""
# TODO: don't compute it everytime...
quantity = self.q_func(self.output_sample_id, axis=0)
return quantity.item() if quantity.size == 1 else quantity
@property
def output_sample_id(self):
"""
"""
if self.output_dim == 1:
return self.output_sample
else:
return self.output_sample[:, self.output_id]
@property
def full_dep_params(self):
"""The matrix of parameters for all the pairs.
"""
full_params = np.zeros((self.corr_dim, ))
pair_ids = matrix_to_list(self.families, return_ids=True)[1]
full_params[pair_ids] = self.dep_param
if self.fixed_params is not None:
fixed_params, fixed_pairs = matrix_to_list(
self.fixed_params, return_ids=True)
full_params[fixed_pairs] = fixed_params
return full_params
@property
def kendall_tau(self):
"""The Kendall's tau of the dependence parameters.
"""
kendalls = []
for family, id_param in zip(*matrix_to_list(self.families, return_ids=True)):
kendall = Conversion(family).to_kendall(
self.full_dep_params[id_param])
# if kendall.size == 1:
# kendall = kendall.item()
kendalls.append(kendall)
return kendalls
@property
def full_kendall_tau(self):
"""The Kendall's tau of the dependence parameters.
"""
kendalls = []
for family, id_param in zip(*matrix_to_list(self.families, return_ids=True, op_char='>=')):
kendall = Conversion(family).to_kendall(
self.full_dep_params[id_param])
# if kendall.size == 1:
# kendall = kendall.item()
kendalls.append(kendall)
return kendalls
|
NazBen/impact-of-dependence
|
depimpact/result.py
|
Python
|
mit
| 32,314
|
[
"Gaussian"
] |
88c77e5852a9850c54cb3ba9c219b429fd82df577624b1c8d7fbe7134f469c0b
|
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModel, CircleModel, EllipseModel, ransac
from skimage.transform import AffineTransform
def test_line_model_invalid_input():
assert_raises(ValueError, LineModel().estimate, np.empty((5, 3)))
def test_line_model_predict():
model = LineModel()
model._params = (10, 1)
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_estimate():
# generate original data without noise
model0 = LineModel()
model0._params = (10, 1)
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data0 = np.column_stack([x0, y0])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0._params, model_est._params, 1)
def test_line_model_residuals():
model = LineModel()
model._params = (0, 0)
assert_equal(abs(model.residuals(np.array([[0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 10]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0]]))), 10)
model._params = (5, np.pi / 4)
assert_equal(abs(model.residuals(np.array([[0, 0]]))), 5)
assert_almost_equal(abs(model.residuals(np.array([[np.sqrt(50), 0]]))), 0)
def test_line_model_under_determined():
data = np.empty((1, 2))
assert_raises(ValueError, LineModel().estimate, data)
def test_circle_model_invalid_input():
assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model._params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0._params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0._params, model_est._params, 1)
def test_circle_model_residuals():
model = CircleModel()
model._params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))),
np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_ellipse_model_invalid_input():
assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
r = 5
model._params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
# generate original data without noise
model0 = EllipseModel()
model0._params = (10, 20, 15, 25, 0)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0._params, model_est._params, 0)
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model._params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
np.random.seed(1)
# generate original data without noise
model0 = CircleModel()
model0._params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5)
# test whether estimated parameters equal original parameters
assert_equal(model0._params, model_est._params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
np.random.seed(1)
# generate original data without noise
src = 100 * np.random.random((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1,
translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0._matrix, model_est._matrix)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
np.random.seed(1)
is_data_valid = lambda data: data.shape[0] > 2
model, inliers = ransac(np.empty((10, 2)), LineModel, 2, np.inf,
is_data_valid=is_data_valid)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
np.random.seed(1)
def is_model_valid(model, data):
return False
model, inliers = ransac(np.empty((10, 2)), LineModel, 2, np.inf,
is_model_valid=is_model_valid)
assert_equal(model, None)
assert_equal(inliers, None)
if __name__ == "__main__":
np.testing.run_module_suite()
|
almarklein/scikit-image
|
skimage/measure/tests/test_fit.py
|
Python
|
bsd-3-clause
| 6,311
|
[
"Gaussian"
] |
93c6e700c8f357ef140cca9fab43c50f3d82e74a9b66118864e349c205e852c6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='mongodb-chemistry',
version='0.0.1',
author='Matt Swain',
author_email='m.swain@me.com',
license='MIT',
url='https://github.com/mcs07/mongodb-chemistry',
packages=['mchem'],
description='Proof of concept for a MongoDB chemical database',
keywords='chemistry cheminformatics rdkit',
zip_safe=False,
entry_points={'console_scripts': ['mchem = mchem.cli:cli', 'pgchem = mchem.postgres:cli']},
install_requires=['Click', 'pymongo'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
mcs07/mongodb-chemistry
|
setup.py
|
Python
|
mit
| 1,197
|
[
"RDKit"
] |
319dfb4adbb580c1c1535ab567acc80a91ccd22b32b7b96283d9fe1db61a12fb
|
# DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Francois Percot <percot@gmail.com>
#
import os
# Form Custom implementation of MAINWINDOW
from PyQt4.QtGui import QAction, QApplication, QDockWidget, QFileDialog, QIcon, QMainWindow, QMessageBox
from PyQt4.QtCore import QEvent, Qt, SIGNAL, QModelIndex, QSettings
# Import the template generate by QtDesigner
from _mainWindow import Ui_MainWindow
# CORE
from api.env import *
from api.taskmanager import *
from api.loader import *
from api.type import *
from api.vfs import *
# CONFIGURATION
from configuration.configureDFF import DFF_ConfigureDFF
from configuration.Config import DFF_Conf
from configuration.Translator import DFF_Translator
# ENV
from widget.info import Info
# IDE
from ide.DFF_Ide import DFF_Ide
#from ide.toolBar import DFF_ToolBar_Ide
# INTERPRETER
from widget.shell import Shell
from widget.interpreter import Interpreter
from widget.stdio import IO
# SCHEDULER
from api.taskmanager.taskmanager import *
# UTILS
from utils.utils import DFF_Utils
from api.gui.dialog.selectnodes import SelectNodes
from api.gui.dialog.applymodule import ApplyModule
from api.gui.widget.nodetree import NodeTree
from api.gui.dialog.property import Property
# Wrapper VFS
from wrapper.connectorCallback import ConnectorCallback
# The MAIN QWindow for DFF application a
class DFF_MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, app):
super(DFF_MainWindow, self).__init__()
self.setupUi(self)
self.app = app
self.DFF_CONFIG = DFF_Conf()
self.taskmanager = TaskManager()
# QMenu List
self.DFF_QMenu = {}
# Custon MainWindow
self.setWindowIcon(QIcon(":newlogo.jpg"))
# Init VFS
self.DFF_Callback = ConnectorCallback(self)
self.env = env.env()
self.tm = TaskManager()
self.loader = loader.loader()
# Init DFF_QDialogs
self.QApplyModule = ApplyModule(self)
self.QConfigureDFF = DFF_ConfigureDFF(self)
self.QPropertyDialog = Property(self)
self.QSelectNodes = SelectNodes(self)
self.initDockWidgets()
# Init Callbacks
self.setupCallback()
# Init ToolBar
self.initToolBars()
self.readSettings()
###############
## DOCK WIDGETS ##
##############
def initDockWidgets(self):
"""Init Dock in application and init DockWidgets"""
#self.setCorner(Qt.TopLeftCorner, Qt.LeftDockWidgetArea)
self.setCorner(Qt.BottomLeftCorner, Qt.BottomDockWidgetArea)
self.setCorner(Qt.TopRightCorner, Qt.TopDockWidgetArea)
self.setCorner(Qt.BottomRightCorner, Qt.RightDockWidgetArea)
NodeTree(self)
self.dockWidget = {}
self.listDockWidget = []
self.dockNodeTree = NodeTree.instance
self.dockInfo = Info(self)
self.dockShell = None
self.dockInterpreter = None
self.dockIO = IO(self)
self.dockWidget["Interpreter"] = None
self.dockWidget["IDE"] = None
self.dockWidget["Resultat"] = None
self.dockWidget["I/O"] = self.dockIO
self.dockWidget["Info"] = self.dockInfo
self.addNewDockWidgetTab(Qt.BottomDockWidgetArea, self.dockWidget["I/O"])
self.addNewDockWidgetTab(Qt.BottomDockWidgetArea, self.dockWidget["Info"])
self.dockWidget["NodeTree"] = NodeTree.instance
self.setCentralWidget(self.dockWidget["NodeTree"])
dock = self.dockNodeTree.addList()
self.dockNodeTree.setChild(dock.widget)
def addNewDockWidgetTab(self, dockArea, dockWidget):
if dockWidget is None :
return
for i in range(0, len(self.listDockWidget)) :
area = self.dockWidgetArea(self.listDockWidget[i])
if area == dockArea :
self.addDockWidget(dockArea, dockWidget)
self.tabifyDockWidget(self.listDockWidget[i], dockWidget)
return
self.listDockWidget.append(dockWidget)
self.addDockWidget(dockArea, dockWidget)
def addResultatDockWidget(self, dockWidget):
if self.dockWidget["NodeTree"] is None :
self.dockWidget["NodeTree"] = dockWidget
################
## ADD DOCKWIDGET ##
################
def addShell(self):
if self.dockShell is None :
self.dockShell = Shell(self)
self.addNewDockWidgetTab(Qt.RightDockWidgetArea, self.dockShell)
if not self.dockShell.isVisible() :
self.dockShell.show()
def addInterpreter(self):
if self.dockInterpreter is None :
self.dockInterpreter = Interpreter(self)
self.addNewDockWidgetTab(Qt.RightDockWidgetArea, self.dockInterpreter)
if not self.dockInterpreter.isVisible() :
self.dockInterpreter.show()
#####################
## INIT AND CONNECT ACTION ##
#####################
def setupCallback(self):
""" Init Actions """
# MENU FILE
self.connect(self.actionNew_Dump, SIGNAL("triggered()"), self.openAddDump)
# MENU
self.connect(self.actionLoad, SIGNAL("triggered()"), self.openLoadDriver)
# MENU ABOUT
self.connect(self.actionAbout, SIGNAL("triggered()"), self.openAbout)
# DOCKWIDGET
# TOOLBAR
self.connect(self.actionApplyModule, SIGNAL("triggered()"), self.openApplyModule)
self.connect(self.actionShell, SIGNAL("triggered()"), self.addShell)
self.connect(self.actionInterpreter, SIGNAL("triggered()"), self.addInterpreter)
self.connect(self.actionList_Files, SIGNAL("triggered()"), self.dockNodeTree.addList)
#############
## INIT TOOLBAR ##
#############
def initToolBars(self):
""" Init Toolbar"""
self.toolBarMain.addAction(self.actionNew_Dump)
self.toolBarMain.addSeparator()
self.toolBarMain.addAction(self.actionApplyModule)
self.toolBarMain.addAction(self.actionShell)
self.toolBarMain.addAction(self.actionInterpreter)
self.toolBarMain.addAction(self.actionList_Files)
self.addToolBar(Qt.TopToolBarArea, self.ideActions.maintoolbar)
#####################
## CALLBACK FOR ALL ACTIONS #
#####################
## MENU
#### NEW DUMP
def openAddDump(self):
""" Open a Dialog for select a file and add in VFS """
sFileName = QFileDialog.getOpenFileNames(self, QApplication.translate("MainWindow", "Add Dumps", None, QApplication.UnicodeUTF8), os.path.expanduser('~'))
for name in sFileName:
arg = self.env.libenv.argument("gui_input")
arg.thisown = 0
arg.add_node("parent", self.dockNodeTree.treeItemModel.rootItem.node)
arg.add_path("path", str(name))
exec_type = ["thread", "gui"]
self.taskmanager.add("local", arg, exec_type)
## MENU
#### ABOUT
def openAbout(self):
""" Open a About Dialog """
QMessageBox.information(self, QApplication.translate("MainWindow", "About", None, QApplication.UnicodeUTF8), QApplication.translate("MainWindow", "<b>Digital Forensics Framework</b> (version 0.5)<br><br> If you have any troubles, please visit our <a href=\"http://wiki.digital-forensic.org\"> support page</a><br>IRC channel: freenode #digital-forensic<br>More information: <a href=\"ht\
tp://www.digital-forensic.org\"> digital-forensic </a><br><br>Software developed by <a href=\"http://arxsys.fr\"> ArxSys</a>", None, QApplication.UnicodeUTF8))
#### APPLY MODULE
def openApplyModule(self, nameModule = None, typeModule = None, nodesSelected = None):
if(self.QApplyModule.isVisible()):
QMessageBox.critical(self, "Erreur", u"This box is already open")
else:
self.QApplyModule.initAllInformations(nameModule, typeModule, nodesSelected)
iReturn = self.QApplyModule.exec_()
if iReturn :
type = self.QApplyModule.currentType()
script = self.QApplyModule.currentModuleName()
arg = self.QApplyModule.getDFFArguments()
self.QApplyModule.deleteAllArguments()
def openLoadDriver(self):
sFileName = QFileDialog.getOpenFileName(self, QApplication.translate("MainWindow", "Add Dump", None, QApplication.UnicodeUTF8), "/home", "Modules(*.so *.py *.dll *.mod);; driver(*.so *.dll);; script(*.py)")
if (sFileName) :
self.loader.do_load(str(sFileName))
def closeEvent(self, e):
settings = QSettings("ArxSys", "DFF-0.5")
settings.setValue("geometry", self.saveGeometry())
settings.setValue("windowState", self.saveState())
def readSettings(self):
settings = QSettings("ArxSys", "DFF-0.5")
self.restoreGeometry(settings.value("geometry").toByteArray())
self.restoreState(settings.value("windowState").toByteArray())
|
elthariel/dff
|
ui/gui/mainWindow.py
|
Python
|
gpl-2.0
| 9,488
|
[
"VisIt"
] |
180b5bc5613e0950285a4e6e215e012679d61afb808b881711a0de2de3c7a764
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
import ast
import re
import subprocess
import textwrap
import unittest
from grumpy.compiler import block
from grumpy.compiler import shard_test
from grumpy.compiler import stmt
from grumpy.compiler import util
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignUnsupportedOp(self):
expected = 'augmented assignment op not implemented'
self.assertRaisesRegexp(util.ParseError, expected,
_ParseAndVisit, 'foo **= bar')
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
else:
print 'bar'
if False:
print 'foo'
else:
print 'bar'""")))
def testImport(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
import sys
print type(sys.modules)""")))
def testImportConflictingPackage(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import time
from __go__.time import Now""")))
def testImportNative(self):
self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
from __go__.time import Nanosecond, Second
print Nanosecond, Second""")))
def testImportGrump(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
from __go__.grumpy import Assert
Assert(__frame__(), True, 'bad')""")))
def testImportNativeModuleRaises(self):
regexp = r'for native imports use "from __go__\.xyz import \.\.\." syntax'
self.assertRaisesRegexp(util.ParseError, regexp, _ParseAndVisit,
'import __go__.foo')
def testImportNativeType(self):
self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
from __go__.time import type_Duration as Duration
print Duration""")))
def testPrint(self):
self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
print 'abc',
print '123'
print 'foo', 'bar'""")))
def testRaiseExitStatus(self):
self.assertEqual(1, _GrumpRun('raise Exception')[0])
def testRaiseInstance(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise RuntimeError('foo')
print 'bad'
except RuntimeError as e:
print e""")))
def testRaiseTypeAndArg(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise KeyError('foo')
print 'bad'
except KeyError as e:
print e""")))
def testRaiseAgain(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
try:
raise AssertionError('foo')
except AssertionError:
raise
except Exception as e:
print e""")))
def testRaiseTraceback(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import sys
try:
try:
raise Exception
except:
e, _, tb = sys.exc_info()
raise e, None, tb
except:
e2, _, tb2 = sys.exc_info()
assert e is e2
assert tb is tb2""")))
def testReturn(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
return 'bar'
print foo()""")))
def testTryBareExcept(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except:
pass""")))
def testTryElse(self):
self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
except:
print 'bar'
else:
print 'baz'""")))
def testTryMultipleExcept(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except RuntimeError:
print 'foo'
except AssertionError:
print 'bar'
except:
print 'baz'""")))
def testTryFinally(self):
result = _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
finally:
print 'bar'
try:
print 'foo',
raise Exception
finally:
print 'bar'"""))
self.assertEqual(1, result[0])
# Some platforms show "exit status 1" message so don't test strict equality.
self.assertIn('foo bar\nfoo bar\nException\n', result[1])
def testWhile(self):
self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
i = 2
while i:
print i
i -= 1""")))
def testWhileElse(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
while False:
print 'foo'
else:
print 'bar'""")))
def testWith(self):
self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
print "enter"
def __exit__(self, exc_type, value, traceback):
print "exit"
a = ContextManager()
with a:
print 1
try:
with a:
print 2
raise RuntimeError
except RuntimeError:
print 3
""")))
def testWithAs(self):
self.assertEqual((0, '1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)]:
print x, y, z
""")))
def testWriteExceptDispatcherBareExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=None)]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
r'goto Label1.*goto Label2', re.DOTALL)
self.assertRegexpMatches(visitor.writer.out.getvalue(), expected)
def testWriteExceptDispatcherBareExceptionNotLast(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=None),
ast.ExceptHandler(type=ast.Name(id='foo'))]
self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
visitor._write_except_dispatcher, # pylint: disable=protected-access
'exc', 'tb', handlers)
def testWriteExceptDispatcherMultipleExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=ast.Name(id='bar'))]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(
r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
self.assertRegexpMatches(visitor.writer.out.getvalue(), expected)
def _MakeModuleBlock():
return block.ModuleBlock('__main__', 'grumpy', 'grumpy/lib', '<test>', [])
def _ParseAndVisit(source):
b = block.ModuleBlock('__main__', 'grumpy', 'grumpy/lib', '<test>',
source.split('\n'))
visitor = stmt.StatementVisitor(b)
visitor.visit(ast.parse(source))
return visitor
def _GrumpRun(cmd):
p = subprocess.Popen(['grumprun'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
|
AlexEKoren/grumpy
|
compiler/stmt_test.py
|
Python
|
apache-2.0
| 14,166
|
[
"VisIt"
] |
9e03c61f7bffafe69adc4a6c4517d31e87f81cfc0b4a6ae7cfc5de46253182a1
|
import math
import numpy
from chainer import cuda
from chainer.functions.connection import deconvolution_2d
from chainer import initializers
from chainer import link
class Deconvolution2D(link.Link):
"""Two dimensional deconvolution function.
This link wraps the :func:`~chainer.functions.deconvolution_2d` function
and holds the filter weight and bias vector as parameters.
Args:
in_channels (int): Number of channels of input arrays.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
wscale (float): Scaling factor of the initial weight.
bias (float): Initial bias value.
nobias (bool): If ``True``, then this function does not use the bias
term.
outsize (tuple): Expected output size of deconvolutional operation.
It should be pair of height and width :math:`(out_H, out_W)`.
Default value is ``None`` and the outsize is estimated by
input size, stride and pad.
use_cudnn (bool): If ``True``, then this function uses cuDNN if
available.
initialW (4-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`
which indicate the number of the number of input channels, output channels,
height and width of the kernels, respectively.
The filter weight is initialized with i.i.d. Gaussian random samples, each
of which has zero mean and deviation :math:`\\sqrt{1/(c_I k_H k_W)}` by
default. The deviation is scaled by ``wscale`` if specified.
The bias vector is of size :math:`c_O`.
Its elements are initialized by ``bias`` argument.
If ``nobias`` argument is set to True, then this function does not hold
the bias parameter.
.. seealso::
See :func:`chainer.functions.deconvolution_2d` for the definition of
two-dimensional convolution.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
wscale=1, bias=0, nobias=False, outsize=None, use_cudnn=True,
initialW=None, initial_bias=None):
kh, kw = _pair(ksize)
self.stride = _pair(stride)
self.pad = _pair(pad)
self.outsize = (None, None) if outsize is None else outsize
self.use_cudnn = use_cudnn
W_shape = (in_channels, out_channels, kh, kw)
super(Deconvolution2D, self).__init__(W=W_shape)
if isinstance(initialW, (numpy.ndarray, cuda.ndarray)):
assert initialW.shape == (in_channels, out_channels, kh, kw)
# For backward compatibility, the scale of weights is proportional to
# the square root of wscale.
initializers.init_weight(self.W.data, initialW,
scale=math.sqrt(wscale))
if nobias:
self.b = None
else:
self.add_param('b', out_channels)
if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):
assert initial_bias.shape == (out_channels,)
if initial_bias is None:
initial_bias = bias
initializers.init_weight(self.b.data, initial_bias)
def __call__(self, x):
return deconvolution_2d.deconvolution_2d(
x, self.W, self.b, self.stride, self.pad,
self.outsize, self.use_cudnn)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
|
benob/chainer
|
chainer/links/connection/deconvolution_2d.py
|
Python
|
mit
| 4,249
|
[
"Gaussian"
] |
92703b7a188aa1c93ada9dd0c546724c30e327dca1dc3d213baf261c4d16942e
|
""".. module:: Test_DAG
Test cases for DIRAC.Core.Utilities.DAG module.
"""
import unittest
# sut
from DIRAC.Core.Utilities.DAG import DAG, makeFrozenSet
__RCSID__ = "$Id $"
########################################################################
class DAGTestCase( unittest.TestCase ):
""" Test case for DIRAC.Core.Utilities.DAG module
"""
pass
class DAGSimple(DAGTestCase):
def test_makeFrozenSet(self):
""" test makeFrozenSet
"""
res = makeFrozenSet({'a':'b'})
self.assertEqual(res, frozenset({('a','b')}))
# dict with lists in
dList1 = {'a':[]}
res = makeFrozenSet(dList1)
self.assertEqual(res, frozenset({('a',frozenset([]))}))
dList2 = {'a':[0, 1]}
res = makeFrozenSet(dList2)
self.assertEqual(res, frozenset({('a',frozenset([0, 1]))}))
dList3 = {'a':[0, 1], 'b':0}
res = makeFrozenSet(dList3)
self.assertEqual( res, frozenset( { ('a',frozenset([0, 1])), ('b', 0) } ) )
# dict with sets in
dSet1 = {'a':set()}
res = makeFrozenSet(dSet1)
self.assertEqual( res, frozenset( { ('a', frozenset([])) } ) )
#dict with dicts in
dDict1 = {'a': {'a':'b'}}
res = makeFrozenSet(dDict1)
self.assertEqual( res, frozenset( { ('a', frozenset( {('a', 'b')} ) ) } ) )
# #dicts with sets, list, and dicts in
dAll = {'a': {'a':'b'}, 'c':[0,1], 'd':set()}
res = makeFrozenSet(dAll)
self.assertEqual( res, frozenset( { ('a', frozenset( {('a', 'b')} ) ), ('c', frozenset([0, 1])), ('d', frozenset([])) } ) )
class DAGFull(DAGTestCase):
def test_getList(self):
""" test dag to list
"""
dag = DAG()
dag.addNode('A')
l = dag.getList()
self.assertEqual(l, ['A'])
dag.addNode('C')
dag.addEdge('A', 'C')
l = dag.getList()
self.assertEqual(l, ['A', 'C'])
dag.addNode('B')
dag.addEdge('C', 'B')
l = dag.getList()
self.assertEqual(l, ['A', 'C', 'B'])
d = dict(zip('ab', range(2)))
dag.addNode(d)
dag.addEdge('B', d)
l = dag.getList()
self.assertEqual(l, ['A', 'C', 'B', d])
l1 = list(range(2))
dag.addNode(l1)
dag.addEdge(d, l1)
l = dag.getList()
self.assertEqual(l, ['A', 'C', 'B', d, l1])
dag.addNode('E')
dag.addEdge(l1, 'E')
l = dag.getList()
self.assertEqual(l, ['A', 'C', 'B', d, l1, 'E'])
dag1 = DAG()
dag1.addNode(d)
dag1.addNode(l1)
dag1.addEdge(d, l1)
l = dag1.getList()
self.assertEqual(l, [d, l1])
def test_full(self):
""" test dag creation and more
"""
dag = DAG()
i_n = dag.getIndexNodes()
self.assertEqual(i_n, [])
dag.addNode('A')
self.assertEqual(dag.graph, {'A': set()})
dag.addNode('A')
self.assertEqual(dag.graph, {'A': set()})
dag.addNode('B')
self.assertEqual(dag.graph, {'A': set(), 'B': set()})
l = dag.getList()
self.assertEqual(l, [])
dag.addEdge('A', 'B')
self.assertEqual(dag.graph, {'A': {'B'}, 'B': set()})
l = dag.getList()
self.assertEqual(l, ['A', 'B'])
dag.addEdge('A', 'B')
self.assertEqual(dag.graph, {'A': {'B'}, 'B': set()})
dag.addEdge('A', 'C')
self.assertEqual(dag.graph, {'A': {'B'}, 'B': set()})
dag.addNode('C')
dag.addEdge('A', 'C')
self.assertEqual(dag.graph, {'A': {'B', 'C'}, 'B': set(), 'C': set()})
l = dag.getList()
self.assertEqual(l, ['A'])
dag.addEdge('C', 'A') #this would be cyclic, so it should not change the graph
self.assertEqual(dag.graph, {'A': {'B', 'C'}, 'B': set(), 'C': set()})
dag.addNode('D')
i_n = dag.getIndexNodes()
self.assertEqual(i_n, ['A', 'D'])
dag.addNode('E')
i_n = dag.getIndexNodes()
self.assertEqual(sorted(i_n), sorted(['A', 'D', 'E']))
dag.addEdge('A', 'D')
dag.addEdge('D', 'E')
self.assertEqual(dag.graph, {'A': {'B', 'C', 'D'}, 'B': set(), 'C': set(), 'D': {'E'}, 'E': set()} )
i_n = dag.getIndexNodes()
self.assertEqual(i_n, ['A'])
dag.addEdge('E', 'A')
self.assertEqual(dag.graph, {'A': {'B', 'C', 'D'}, 'B': set(), 'C': set(), 'D': {'E'}, 'E': {'A'}} )
#now an object
class forTest(object):
pass
ft = forTest()
dag.addNode(ft)
self.assertEqual(dag.graph, {'A': {'B', 'C', 'D'}, 'B': set(), 'C': set(), 'D': {'E'}, 'E': {'A'}, ft: set()} )
dag.addEdge('B', ft)
self.assertEqual( dag.graph,
{'A': {'B', 'C', 'D'}, 'B': {ft}, 'C': set(), 'D': {'E'}, 'E': {'A'}, ft: set()}
)
#now sets, dicts and lists as nodes
d = dict(zip('ab', range(2)))
dag.addNode(d)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C', 'D'},
'B': {ft},
'C': set(),
'D': {'E'},
'E': {'A'},
ft: set(),
frozenset({('a',0), ('b',1)}): set()})
dag.addEdge(ft, d)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C', 'D'},
'B': {ft},
'C': set(),
'D': {'E'},
'E': {'A'},
ft: set([frozenset({('a',0), ('b',1)})]),
frozenset({('a',0), ('b',1)}): set()
}
)
l = list(range(2))
dag.addNode(l)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C', 'D'},
'B': {ft},
'C': set(),
'D': {'E'},
'E': {'A'},
ft: set([frozenset({('a',0), ('b',1)})]), #ft -> d
frozenset({('a',0), ('b',1)}): set(), #d
frozenset({0,1}): set() #l
}
)
dag.addEdge(d, l)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C', 'D'},
'B': {ft},
'C': set(),
'D': {'E'},
'E': {'A'},
ft: set([frozenset({('a',0), ('b',1)})]), #ft -> d
frozenset({('a',0), ('b',1)}): set([frozenset({0,1})]), #d->l
frozenset({0,1}): set() #l
}
)
del dag.graph['E']
del dag.graph['D']
del dag.graph[ft]
del dag.graph[frozenset({('a',0), ('b',1)})]
dag.graph['A'] = {'B', 'C'}
self.assertEqual( dag.graph,
{ 'A': {'B', 'C'},
'B': {ft},
'C': set(),
frozenset({0,1}): set(), #l
}
)
i_n = dag.getIndexNodes()
self.assertEqual(i_n, ['A', l])
d1 = {'a':'b'}
dag.addNode(d1)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C'},
'B': {ft},
'C': set(),
frozenset({0,1}): set(), #l
frozenset({('a','b')}): set(), #d1
}
)
l1 = ['a', 'b']
dag.addNode(l1)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C'},
'B': {ft},
'C': set(),
frozenset({0,1}): set(), #l
frozenset({('a', 'b')}): set(), #d1
frozenset({'a', 'b'}): set() #l1
}
)
i_n = dag.getIndexNodes()
self.assertEqual(sorted(i_n), sorted(['A', l, d1, l1]))
s1 = set()
dag.addNode(s1)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C'},
'B': {ft},
'C': set(),
frozenset({0,1}): set(), #l
frozenset({('a', 'b')}): set(), #d1
frozenset({'a', 'b'}): set(), #l1
frozenset({}): set() # s1
}
)
#dict with frozenset in
dFSet1 = {'a':frozenset()}
dag.addNode(dFSet1)
self.assertEqual( dag.graph,
{ 'A': {'B', 'C'},
'B': {ft},
'C': set(),
frozenset({0,1}): set(), #l
frozenset({('a', 'b')}): set(), #d1
frozenset({'a', 'b'}): set(), #l1
frozenset({}): set(), # s1
frozenset([('a', frozenset([]))]): set() #dFSet1
}
)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( DAGTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( DAGSimple ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( DAGFull ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
Andrew-McNab-UK/DIRAC
|
Core/Utilities/test/Test_DAG.py
|
Python
|
gpl-3.0
| 8,935
|
[
"DIRAC"
] |
76020908d77c203f0e9b488078d7467c01f1aa0c3ee8bca07e330a0f8d7ccb1b
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.867029
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/ajax/timers.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class timers(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(timers, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_39185864 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<div id="content_main" style="min-height: 500px;">
\t<div id="tvcontentmain">
\t\t<div id="toolbar-header">
\t\t\t<span id="toolbar">
\t\t\t\t<span id="timerbuttons">
\t\t\t\t\t<button id="timerbutton0" onclick="addTimer(); return false">''')
_v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 9, col 67
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 9, col 67.
write(u'''</button>
\t\t\t\t\t<button id="timerbutton1" onclick="cleanupTimer(); return false">''')
_v = VFFSL(SL,"tstrings",True)['cleanup_timer'] # u"$tstrings['cleanup_timer']" on line 10, col 71
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['cleanup_timer']")) # from line 10, col 71.
write(u'''</button>
\t\t\t\t\t<button id="timerbutton2" onclick="lastcontenturl=\'\';load_maincontent(\'ajax/timers\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['refresh_timer'] # u"$tstrings['refresh_timer']" on line 11, col 107
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['refresh_timer']")) # from line 11, col 107.
write(u'''</button>
\t\t\t\t</span>
\t\t\t</span>
\t\t</div>
\t\t<div id="timers">
''')
for timer in VFFSL(SL,"timers",True): # generated from line 17, col 3
write(u'''\t\t\t<div class="tm_row" id="''')
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 18, col 28
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 18, col 28.
write(u'''-''')
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 18, col 41
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 18, col 41.
write(u'''">
\t\t\t\t<div class="tm_text">
\t\t\t\t\t<div class="tm_buttons">
\t\t\t\t\t\t<div>
''')
sref = quote(VFFSL(SL,"timer.serviceref",True), safe=' ~@#$&()*!+=:;,.?/\'')
tname = quote(VFFSL(SL,"timer.name",True))
write(u'''\t\t\t\t\t\t<a href=\'#\' onClick="editTimer(\'''')
_v = VFFSL(SL,"sref",True) # u'$sref' on line 24, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$sref')) # from line 24, col 39.
write(u"""', '""")
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 24, col 48
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 24, col 48.
write(u"""', '""")
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 24, col 64
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 24, col 64.
write(u'''\');" title="''')
_v = VFFSL(SL,"tstrings",True)['edit_timer'] # u"$tstrings['edit_timer']" on line 24, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['edit_timer']")) # from line 24, col 86.
write(u'''"><div class="ow_i ow_i_edit"></div></a>
\t\t\t\t\t\t<a href=\'#\' onClick="deleteTimer(\'''')
_v = VFFSL(SL,"sref",True) # u'$sref' on line 25, col 41
if _v is not None: write(_filter(_v, rawExpr=u'$sref')) # from line 25, col 41.
write(u"""', '""")
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 25, col 50
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 25, col 50.
write(u"""', '""")
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 25, col 66
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 25, col 66.
write(u"""', '""")
_v = VFFSL(SL,"tname",True) # u'$tname' on line 25, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$tname')) # from line 25, col 80.
write(u'''\');" title="''')
_v = VFFSL(SL,"tstrings",True)['delete_timer'] # u"$tstrings['delete_timer']" on line 25, col 98
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['delete_timer']")) # from line 25, col 98.
write(u'''"><div class="ow_i ow_i_delete"></div></a>
\t\t\t\t\t\t<a href=\'#\' onClick="toggleTimerStatus(\'''')
_v = VFFSL(SL,"sref",True) # u'$sref' on line 26, col 47
if _v is not None: write(_filter(_v, rawExpr=u'$sref')) # from line 26, col 47.
write(u"""', '""")
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 26, col 56
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 26, col 56.
write(u"""', '""")
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 26, col 72
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 26, col 72.
write(u'''\');"
''')
if VFFSL(SL,"timer.disabled",True): # generated from line 27, col 7
write(u'''\t\t\t\t\t\ttitle="''')
_v = VFFSL(SL,"tstrings",True)['enable_timer'] # u"$tstrings['enable_timer']" on line 28, col 14
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['enable_timer']")) # from line 28, col 14.
write(u'''"><div id="img-''')
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 28, col 54
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 28, col 54.
write(u'''-''')
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 28, col 67
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 28, col 67.
write(u'''" class="ow_i ow_i_disabled">
''')
else: # generated from line 29, col 7
write(u'''\t\t\t\t\t\ttitle="''')
_v = VFFSL(SL,"tstrings",True)['disable_timer'] # u"$tstrings['disable_timer']" on line 30, col 14
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['disable_timer']")) # from line 30, col 14.
write(u'''"><div id="img-''')
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 30, col 55
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 30, col 55.
write(u'''-''')
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 30, col 68
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 30, col 68.
write(u'''" class="ow_i ow_i_enabled">
''')
write(u'''\t\t\t\t\t\t</div></a>
\t\t\t\t\t\t</div>
\t\t\t\t\t</div>
\t\t\t\t\t<div>
\t\t\t\t\t\t<div style="color: #176093; font-weight: bold;">
\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"timer.name",True) # u'$timer.name' on line 37, col 8
if _v is not None: write(_filter(_v, rawExpr=u'$timer.name')) # from line 37, col 8.
write(u'''
\t\t\t\t\t\t</div>
\t\t\t\t\t\t''')
_v = VFFSL(SL,"timer.realbegin",True) # u'$timer.realbegin' on line 39, col 7
if _v is not None: write(_filter(_v, rawExpr=u'$timer.realbegin')) # from line 39, col 7.
write(u''' - ''')
_v = VFFSL(SL,"timer.realend",True) # u'$timer.realend' on line 39, col 26
if _v is not None: write(_filter(_v, rawExpr=u'$timer.realend')) # from line 39, col 26.
write(u'''
''')
if VFFSL(SL,"timer.repeated",True) != 0: # generated from line 40, col 7
write(u'''\t\t\t\t\t\t\t<div>
\t\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['every_timer'] # u"$tstrings['every_timer']" on line 42, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['every_timer']")) # from line 42, col 9.
write(u'''
''')
flags = VFFSL(SL,"timer.repeated",True)
timerDays = []
for day in [VFFSL(SL,"tstrings",True)['monday'],VFFSL(SL,"tstrings",True)['tuesday'],VFFSL(SL,"tstrings",True)['wednesday'],VFFSL(SL,"tstrings",True)['thursday'],VFFSL(SL,"tstrings",True)['friday'],VFFSL(SL,"tstrings",True)['saturday'],VFFSL(SL,"tstrings",True)['sunday']]: # generated from line 45, col 8
if VFFSL(SL,"flags",True)&1: # generated from line 46, col 9
write(u'''\t\t\t\t\t\t\t\t\t''')
_v = VFN(VFFSL(SL,"timerDays",True),"append",False)(VFFSL(SL,"day",True)) # u'$timerDays.append($day)' on line 47, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$timerDays.append($day)')) # from line 47, col 10.
write(u'''
''')
flags = VFFSL(SL,"flags",True) >> 1
_v = ', '.join(VFFSL(SL,"timerDays",True))
if _v is not None: write(_filter(_v))
write(u'''\t\t\t\t\t\t\t</div>
''')
write(u'''\t\t\t\t\t\t<div style="font-weight: bold;">
\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"timer.servicename",True) # u'$timer.servicename' on line 55, col 8
if _v is not None: write(_filter(_v, rawExpr=u'$timer.servicename')) # from line 55, col 8.
write(u'''
\t\t\t\t\t\t</div>
\t\t\t\t\t</div>
\t\t\t\t\t<div>
\t\t\t\t\t\t''')
_v = VFFSL(SL,"timer.description",True) # u'$timer.description' on line 59, col 7
if _v is not None: write(_filter(_v, rawExpr=u'$timer.description')) # from line 59, col 7.
write(u'''
\t\t\t\t\t</div>
\t\t\t\t\t<div>
\t\t\t\t\t\t<span style="color: #7F8181; font-weight: bold;">
''')
if VFFSL(SL,"timer.state",True) == 0: # generated from line 63, col 8
write(u'''\t\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['waiting'] # u"$tstrings['waiting']" on line 64, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['waiting']")) # from line 64, col 9.
write(u'''
''')
elif VFFSL(SL,"timer.state",True) == 2: # generated from line 65, col 8
write(u'''\t\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['running'] # u"$tstrings['running']" on line 66, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['running']")) # from line 66, col 9.
write(u'''
''')
elif VFFSL(SL,"timer.state",True) == 3: # generated from line 67, col 8
write(u'''\t\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['finished'] # u"$tstrings['finished']" on line 68, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['finished']")) # from line 68, col 9.
write(u'''
''')
write(u'''\t\t\t\t\t\t</span>
\t\t\t\t\t</div>
\t\t\t\t</div>
\t\t\t\t<div style="clear: both;"></div>
\t\t\t</div>
''')
write(u'''\t\t</div>
\t</div>
</div>
<script type="text/javascript">
var reloadTimers = true;
$(function() {
\t$(\'#timerbuttons\').buttonset();
\tif(!timeredit_initialized)
\t\t$(\'#editTimerForm\').load(\'ajax/edittimer\');
});
</script>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_39185864
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_timers= 'respond'
## END CLASS DEFINITION
if not hasattr(timers, '_initCheetahAttributes'):
templateAPIClass = getattr(timers, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(timers)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=timers()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/ajax/timers.py
|
Python
|
gpl-2.0
| 15,817
|
[
"VisIt"
] |
21400610e64287e2435239e8b6e0c57e572d7bbe4626e52389fe66f291b1f0f3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Molecule, Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor:
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen objects.
"""
@staticmethod
def get_atoms(structure, **kwargs):
"""
Returns ASE Atoms object from pymatgen structure or molecule.
Args:
structure: pymatgen.core.structure.Structure or pymatgen.core.structure.Molecule
**kwargs: other keyword args to pass into the ASE Atoms constructor
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
if not ase_loaded:
raise ImportError(
"AseAtomsAdaptor requires ase package.\n" "Use `pip install ase` or `conda install ase -c conda-forge`"
)
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
if hasattr(structure, "lattice"):
cell = structure.lattice.matrix
pbc = True
else:
cell = None
pbc = None
return Atoms(symbols=symbols, positions=positions, pbc=pbc, cell=cell, **kwargs)
@staticmethod
def get_structure(atoms, cls=None):
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Structure class to instantiate (defaults to pymatgen structure)
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions, coords_are_cartesian=True)
@staticmethod
def get_molecule(atoms, cls=None):
"""
Returns pymatgen molecule from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Molecule class to instantiate (defaults to pymatgen molecule)
Returns:
Equivalent pymatgen.core.structure.Molecule
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
cls = Molecule if cls is None else cls
return cls(symbols, positions)
|
gmatteo/pymatgen
|
pymatgen/io/ase.py
|
Python
|
mit
| 2,883
|
[
"ASE",
"pymatgen"
] |
78aa46bb0ae56537598c5db84b4e2a2a93bd0c2c3d668bdfa2fee5ac5d637ae6
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Rollback Config back to Lenovo Switches
#
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_rollback
author: "Dave Kasberg (@dkasberg)"
short_description: Roll back the running or startup configuration from a remote server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a way to roll back configurations
of a switch from a remote server. This is achieved by using startup or running configurations of the target
device that were previously backed up to a remote server using FTP, SFTP, TFTP, or SCP.
The first step is to create a directory from where the remote server can be reached. The next step is to
provide the full file path of the backup configuration’s location. Authentication details required by the
remote server must be provided as well.
By default, this method overwrites the switch’s configuration file with the newly downloaded file.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_rollback.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This refers to the type of configuration which will be used for the rolling back process.
The choices are the running or startup configurations. There is no default value, so it will result
in an error if the input is incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to interact with the remote server from where to
download the backup configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other protocols will result
in error. If this parameter is not specified, there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
- This specifies the IP Address of the remote server from where the backup configuration will be downloaded.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path of the configuration file located on the remote server. In case the relative
path is used as the variable value, the root folder for the user of the server needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify the username for the server relating to the protocol used.
required: Yes
default: Null
serverpassword:
description:
- Specify the password for the server relating to the protocol used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_rollback. These are written in the main.yml file of the tasks directory.
---
- name: Test Rollback of config - Running config
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Startup config
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Running config - TFTP
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Startup config - TFTP
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Config file tranferred to Device]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
host = module.params['host']
deviceType = module.params['deviceType']
configType = module.params['configType']
protocol = module.params['protocol'].lower()
rcserverip = module.params['serverip']
rcpath = module.params['rcpath']
serveruser = module.params['serverusername']
serverpwd = module.params['serverpassword']
output = ""
timeout = 90
tftptimeout = 450
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(host, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Invoke method for Config transfer from server
if(configType == 'running-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doRunningConfigRollback(protocol, tftptimeout, rcserverip, rcpath, serveruser, serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureRunningConfigRollback(protocol, timeout, rcserverip, rcpath, serveruser, serverpwd, remote_conn)
else:
transfer_status = "Invalid Protocol option"
elif(configType == 'startup-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doStartUpConfigRollback(protocol, tftptimeout, rcserverip, rcpath, serveruser, serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureStartUpConfigRollback(protocol, timeout, rcserverip, rcpath, serveruser, serverpwd, remote_conn)
else:
transfer_status = "Invalid Protocol option"
else:
transfer_status = "Invalid configType Option"
output = output + "\n Config Transfer status \n" + transfer_status
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to Device")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
j00bar/ansible
|
lib/ansible/modules/network/lenovo/cnos_rollback.py
|
Python
|
gpl-3.0
| 11,138
|
[
"VisIt"
] |
66f078cd7a607af7479e0544c667d65172187d9eb3ba9a54cd5696acd667a52b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.